Merge changes from linux-2.6 by hand
authorPaul Mackerras <paulus@samba.org>
Sat, 22 Oct 2005 06:51:34 +0000 (16:51 +1000)
committerPaul Mackerras <paulus@samba.org>
Sat, 22 Oct 2005 06:51:34 +0000 (16:51 +1000)
432 files changed:
arch/powerpc/Kconfig [new file with mode: 0644]
arch/powerpc/Kconfig.debug [new file with mode: 0644]
arch/powerpc/Makefile [new file with mode: 0644]
arch/powerpc/kernel/Makefile [new file with mode: 0644]
arch/powerpc/kernel/asm-offsets.c [new file with mode: 0644]
arch/powerpc/kernel/binfmt_elf32.c [moved from arch/ppc64/kernel/binfmt_elf32.c with 94% similarity]
arch/powerpc/kernel/btext.c [new file with mode: 0644]
arch/powerpc/kernel/cputable.c [moved from arch/ppc/kernel/cputable.c with 51% similarity]
arch/powerpc/kernel/entry_32.S [new file with mode: 0644]
arch/powerpc/kernel/entry_64.S [new file with mode: 0644]
arch/powerpc/kernel/fpu.S [moved from arch/ppc/kernel/fpu.S with 69% similarity]
arch/powerpc/kernel/head_32.S [new file with mode: 0644]
arch/powerpc/kernel/head_44x.S [new file with mode: 0644]
arch/powerpc/kernel/head_4xx.S [new file with mode: 0644]
arch/powerpc/kernel/head_64.S [new file with mode: 0644]
arch/powerpc/kernel/head_8xx.S [new file with mode: 0644]
arch/powerpc/kernel/head_fsl_booke.S [new file with mode: 0644]
arch/powerpc/kernel/idle_6xx.S [new file with mode: 0644]
arch/powerpc/kernel/idle_power4.S [moved from arch/ppc64/kernel/idle_power4.S with 95% similarity]
arch/powerpc/kernel/init_task.c [moved from arch/ppc64/kernel/init_task.c with 100% similarity]
arch/powerpc/kernel/lparmap.c [moved from arch/ppc64/kernel/lparmap.c with 100% similarity]
arch/powerpc/kernel/misc_32.S [new file with mode: 0644]
arch/powerpc/kernel/misc_64.S [new file with mode: 0644]
arch/powerpc/kernel/of_device.c [moved from arch/ppc64/kernel/of_device.c with 99% similarity]
arch/powerpc/kernel/pmc.c [moved from arch/ppc64/kernel/pmc.c with 73% similarity]
arch/powerpc/kernel/ppc_ksyms.c [new file with mode: 0644]
arch/powerpc/kernel/process.c [moved from arch/ppc64/kernel/process.c with 61% similarity]
arch/powerpc/kernel/prom.c [new file with mode: 0644]
arch/powerpc/kernel/prom_init.c [new file with mode: 0644]
arch/powerpc/kernel/ptrace.c [moved from arch/ppc/kernel/ptrace.c with 78% similarity]
arch/powerpc/kernel/ptrace32.c [moved from arch/ppc64/kernel/ptrace32.c with 97% similarity]
arch/powerpc/kernel/semaphore.c [new file with mode: 0644]
arch/powerpc/kernel/setup_32.c [new file with mode: 0644]
arch/powerpc/kernel/setup_64.c [new file with mode: 0644]
arch/powerpc/kernel/signal_32.c [moved from arch/ppc64/kernel/signal32.c with 54% similarity]
arch/powerpc/kernel/sys_ppc32.c [moved from arch/ppc64/kernel/sys_ppc32.c with 74% similarity]
arch/powerpc/kernel/syscalls.c [moved from arch/ppc64/kernel/syscalls.c with 54% similarity]
arch/powerpc/kernel/systbl.S [new file with mode: 0644]
arch/powerpc/kernel/time.c [moved from arch/ppc64/kernel/time.c with 72% similarity]
arch/powerpc/kernel/traps.c [new file with mode: 0644]
arch/powerpc/kernel/vecemu.c [moved from arch/ppc/kernel/vecemu.c with 100% similarity]
arch/powerpc/kernel/vector.S [moved from arch/ppc64/kernel/vector.S with 76% similarity]
arch/powerpc/kernel/vmlinux.lds.S [new file with mode: 0644]
arch/powerpc/lib/Makefile [new file with mode: 0644]
arch/powerpc/lib/checksum_32.S [new file with mode: 0644]
arch/powerpc/lib/checksum_64.S [new file with mode: 0644]
arch/powerpc/lib/copy_32.S [new file with mode: 0644]
arch/powerpc/lib/copypage_64.S [new file with mode: 0644]
arch/powerpc/lib/copyuser_64.S [new file with mode: 0644]
arch/powerpc/lib/div64.S [new file with mode: 0644]
arch/powerpc/lib/e2a.c [new file with mode: 0644]
arch/powerpc/lib/locks.c [new file with mode: 0644]
arch/powerpc/lib/mem_64.S [new file with mode: 0644]
arch/powerpc/lib/memcpy_64.S [new file with mode: 0644]
arch/powerpc/lib/rheap.c [new file with mode: 0644]
arch/powerpc/lib/sstep.c [new file with mode: 0644]
arch/powerpc/lib/strcase.c [new file with mode: 0644]
arch/powerpc/lib/string.S [new file with mode: 0644]
arch/powerpc/lib/usercopy_64.c [new file with mode: 0644]
arch/powerpc/mm/44x_mmu.c [new file with mode: 0644]
arch/powerpc/mm/4xx_mmu.c [new file with mode: 0644]
arch/powerpc/mm/Makefile [new file with mode: 0644]
arch/powerpc/mm/fault.c [moved from arch/ppc64/mm/fault.c with 76% similarity]
arch/powerpc/mm/fsl_booke_mmu.c [new file with mode: 0644]
arch/powerpc/mm/hash_low_32.S [new file with mode: 0644]
arch/powerpc/mm/hash_low_64.S [moved from arch/ppc64/mm/hash_low.S with 99% similarity]
arch/powerpc/mm/hash_native_64.c [moved from arch/ppc64/mm/hash_native.c with 96% similarity]
arch/powerpc/mm/hash_utils_64.c [moved from arch/ppc64/mm/hash_utils.c with 89% similarity]
arch/powerpc/mm/hugetlbpage.c [moved from arch/ppc64/mm/hugetlbpage.c with 100% similarity]
arch/powerpc/mm/imalloc.c [moved from arch/ppc64/mm/imalloc.c with 100% similarity]
arch/powerpc/mm/init_32.c [new file with mode: 0644]
arch/powerpc/mm/init_64.c [new file with mode: 0644]
arch/powerpc/mm/lmb.c [moved from arch/ppc64/kernel/lmb.c with 69% similarity]
arch/powerpc/mm/mem.c [new file with mode: 0644]
arch/powerpc/mm/mmap.c [moved from arch/ppc64/mm/mmap.c with 100% similarity]
arch/powerpc/mm/mmu_context_32.c [new file with mode: 0644]
arch/powerpc/mm/mmu_context_64.c [new file with mode: 0644]
arch/powerpc/mm/mmu_decl.h [new file with mode: 0644]
arch/powerpc/mm/numa.c [moved from arch/ppc64/mm/numa.c with 100% similarity]
arch/powerpc/mm/pgtable_32.c [new file with mode: 0644]
arch/powerpc/mm/pgtable_64.c [new file with mode: 0644]
arch/powerpc/mm/ppc_mmu_32.c [new file with mode: 0644]
arch/powerpc/mm/slb.c [moved from arch/ppc64/mm/slb.c with 100% similarity]
arch/powerpc/mm/slb_low.S [moved from arch/ppc64/mm/slb_low.S with 100% similarity]
arch/powerpc/mm/stab.c [moved from arch/ppc64/mm/stab.c with 100% similarity]
arch/powerpc/mm/tlb_32.c [new file with mode: 0644]
arch/powerpc/mm/tlb_64.c [moved from arch/ppc64/mm/tlb.c with 92% similarity]
arch/powerpc/oprofile/Kconfig [moved from arch/ppc/oprofile/Kconfig with 100% similarity]
arch/powerpc/oprofile/Makefile [moved from arch/ppc/oprofile/Makefile with 69% similarity]
arch/powerpc/oprofile/common.c [moved from arch/ppc64/oprofile/common.c with 62% similarity]
arch/powerpc/oprofile/op_model_fsl_booke.c [moved from arch/ppc/oprofile/op_model_fsl_booke.c with 97% similarity]
arch/powerpc/oprofile/op_model_power4.c [moved from arch/ppc64/oprofile/op_model_power4.c with 99% similarity]
arch/powerpc/oprofile/op_model_rs64.c [moved from arch/ppc64/oprofile/op_model_rs64.c with 99% similarity]
arch/powerpc/platforms/4xx/Kconfig [new file with mode: 0644]
arch/powerpc/platforms/4xx/Makefile [new file with mode: 0644]
arch/powerpc/platforms/85xx/Kconfig [new file with mode: 0644]
arch/powerpc/platforms/85xx/Makefile [new file with mode: 0644]
arch/powerpc/platforms/8xx/Kconfig [new file with mode: 0644]
arch/powerpc/platforms/Makefile [new file with mode: 0644]
arch/powerpc/platforms/apus/Kconfig [new file with mode: 0644]
arch/powerpc/platforms/embedded6xx/Kconfig [new file with mode: 0644]
arch/powerpc/platforms/iseries/Kconfig [new file with mode: 0644]
arch/powerpc/platforms/iseries/Makefile [new file with mode: 0644]
arch/powerpc/platforms/iseries/call_hpt.h [moved from include/asm-ppc64/iSeries/HvCallHpt.h with 96% similarity]
arch/powerpc/platforms/iseries/call_pci.h [moved from include/asm-ppc64/iSeries/HvCallPci.h with 56% similarity]
arch/powerpc/platforms/iseries/call_sm.h [moved from include/asm-ppc64/iSeries/HvCallSm.h with 93% similarity]
arch/powerpc/platforms/iseries/htab.c [moved from arch/ppc64/kernel/iSeries_htab.c with 87% similarity]
arch/powerpc/platforms/iseries/hvcall.S [moved from arch/ppc64/kernel/hvCall.S with 95% similarity]
arch/powerpc/platforms/iseries/hvlog.c [moved from arch/ppc64/kernel/HvCall.c with 98% similarity]
arch/powerpc/platforms/iseries/hvlpconfig.c [moved from arch/ppc64/kernel/HvLpConfig.c with 98% similarity]
arch/powerpc/platforms/iseries/iommu.c [moved from arch/ppc64/kernel/iSeries_iommu.c with 82% similarity]
arch/powerpc/platforms/iseries/ipl_parms.h [moved from include/asm-ppc64/iSeries/ItIplParmsReal.h with 96% similarity]
arch/powerpc/platforms/iseries/irq.c [moved from arch/ppc64/kernel/iSeries_irq.c with 98% similarity]
arch/powerpc/platforms/iseries/irq.h [moved from include/asm-ppc64/iSeries/iSeries_irq.h with 64% similarity]
arch/powerpc/platforms/iseries/ksyms.c [new file with mode: 0644]
arch/powerpc/platforms/iseries/lpardata.c [moved from arch/ppc64/kernel/LparData.c with 95% similarity]
arch/powerpc/platforms/iseries/lpevents.c [moved from arch/ppc64/kernel/ItLpQueue.c with 78% similarity]
arch/powerpc/platforms/iseries/main_store.h [moved from include/asm-ppc64/iSeries/IoHriMainStore.h with 97% similarity]
arch/powerpc/platforms/iseries/mf.c [moved from arch/ppc64/kernel/mf.c with 93% similarity]
arch/powerpc/platforms/iseries/misc.S [new file with mode: 0644]
arch/powerpc/platforms/iseries/pci.c [moved from arch/ppc64/kernel/iSeries_pci.c with 86% similarity]
arch/powerpc/platforms/iseries/pci.h [moved from include/asm-ppc64/iSeries/iSeries_pci.h with 55% similarity]
arch/powerpc/platforms/iseries/proc.c [moved from arch/ppc64/kernel/iSeries_proc.c with 91% similarity]
arch/powerpc/platforms/iseries/processor_vpd.h [moved from include/asm-ppc64/iSeries/IoHriProcessorVpd.h with 96% similarity]
arch/powerpc/platforms/iseries/release_data.h [moved from include/asm-ppc64/iSeries/HvReleaseData.h with 95% similarity]
arch/powerpc/platforms/iseries/setup.c [moved from arch/ppc64/kernel/iSeries_setup.c with 74% similarity]
arch/powerpc/platforms/iseries/setup.h [moved from arch/ppc64/kernel/iSeries_setup.h with 90% similarity]
arch/powerpc/platforms/iseries/smp.c [moved from arch/ppc64/kernel/iSeries_smp.c with 73% similarity]
arch/powerpc/platforms/iseries/spcomm_area.h [moved from include/asm-ppc64/iSeries/ItSpCommArea.h with 93% similarity]
arch/powerpc/platforms/iseries/vio.c [moved from arch/ppc64/kernel/iSeries_vio.c with 99% similarity]
arch/powerpc/platforms/iseries/viopath.c [moved from arch/ppc64/kernel/viopath.c with 99% similarity]
arch/powerpc/platforms/iseries/vpd_areas.h [moved from include/asm-ppc64/iSeries/ItVpdAreas.h with 97% similarity]
arch/powerpc/platforms/iseries/vpdinfo.c [moved from arch/ppc64/kernel/iSeries_VpdInfo.c with 95% similarity]
arch/powerpc/platforms/powermac/Makefile [new file with mode: 0644]
arch/powerpc/platforms/powermac/backlight.c [new file with mode: 0644]
arch/powerpc/platforms/powermac/cache.S [new file with mode: 0644]
arch/powerpc/platforms/powermac/cpufreq.c [new file with mode: 0644]
arch/powerpc/platforms/powermac/feature.c [new file with mode: 0644]
arch/powerpc/platforms/powermac/low_i2c.c [moved from arch/ppc64/kernel/pmac_low_i2c.c with 100% similarity]
arch/powerpc/platforms/powermac/nvram.c [moved from arch/ppc64/kernel/pmac_nvram.c with 60% similarity]
arch/powerpc/platforms/powermac/pci.c [new file with mode: 0644]
arch/powerpc/platforms/powermac/pic.c [new file with mode: 0644]
arch/powerpc/platforms/powermac/pic.h [new file with mode: 0644]
arch/powerpc/platforms/powermac/pmac.h [new file with mode: 0644]
arch/powerpc/platforms/powermac/setup.c [new file with mode: 0644]
arch/powerpc/platforms/powermac/sleep.S [new file with mode: 0644]
arch/powerpc/platforms/powermac/smp.c [new file with mode: 0644]
arch/powerpc/platforms/powermac/time.c [new file with mode: 0644]
arch/powerpc/platforms/prep/Kconfig [new file with mode: 0644]
arch/powerpc/platforms/pseries/Kconfig [new file with mode: 0644]
arch/powerpc/platforms/pseries/Makefile [new file with mode: 0644]
arch/powerpc/platforms/pseries/hvCall.S [moved from arch/ppc64/kernel/pSeries_hvCall.S with 100% similarity]
arch/powerpc/platforms/pseries/iommu.c [moved from arch/ppc64/kernel/pSeries_iommu.c with 96% similarity]
arch/powerpc/platforms/pseries/lpar.c [moved from arch/ppc64/kernel/pSeries_lpar.c with 98% similarity]
arch/powerpc/platforms/pseries/nvram.c [moved from arch/ppc64/kernel/pSeries_nvram.c with 100% similarity]
arch/powerpc/platforms/pseries/pci.c [moved from arch/ppc64/kernel/pSeries_pci.c with 99% similarity]
arch/powerpc/platforms/pseries/reconfig.c [moved from arch/ppc64/kernel/pSeries_reconfig.c with 100% similarity]
arch/powerpc/platforms/pseries/setup.c [moved from arch/ppc64/kernel/pSeries_setup.c with 97% similarity]
arch/powerpc/platforms/pseries/smp.c [moved from arch/ppc64/kernel/pSeries_smp.c with 92% similarity]
arch/powerpc/platforms/pseries/vio.c [moved from arch/ppc64/kernel/pSeries_vio.c with 99% similarity]
arch/powerpc/sysdev/Makefile [new file with mode: 0644]
arch/powerpc/sysdev/indirect_pci.c [new file with mode: 0644]
arch/powerpc/sysdev/mpic.c [moved from arch/ppc64/kernel/mpic.c with 96% similarity]
arch/ppc/Kconfig
arch/ppc/Makefile
arch/ppc/kernel/Makefile
arch/ppc/kernel/asm-offsets.c
arch/ppc/kernel/cpu_setup_6xx.S
arch/ppc/kernel/cpu_setup_power4.S
arch/ppc/kernel/entry.S
arch/ppc/kernel/head.S
arch/ppc/kernel/head_44x.S
arch/ppc/kernel/head_4xx.S
arch/ppc/kernel/head_8xx.S
arch/ppc/kernel/head_booke.h
arch/ppc/kernel/head_fsl_booke.S
arch/ppc/kernel/idle.c
arch/ppc/kernel/irq.c
arch/ppc/kernel/l2cr.S
arch/ppc/kernel/misc.S
arch/ppc/kernel/pci.c
arch/ppc/kernel/perfmon.c [deleted file]
arch/ppc/kernel/perfmon_fsl_booke.c
arch/ppc/kernel/ppc_ksyms.c
arch/ppc/kernel/process.c
arch/ppc/kernel/setup.c
arch/ppc/kernel/signal.c [deleted file]
arch/ppc/kernel/smp.c
arch/ppc/kernel/syscalls.c [deleted file]
arch/ppc/kernel/time.c
arch/ppc/kernel/traps.c
arch/ppc/kernel/vector.S [deleted file]
arch/ppc/kernel/vmlinux.lds.S
arch/ppc/lib/string.S
arch/ppc/mm/init.c
arch/ppc/oprofile/common.c [deleted file]
arch/ppc/oprofile/op_impl.h [deleted file]
arch/ppc/platforms/4xx/ebony.c
arch/ppc/platforms/83xx/mpc834x_sys.h
arch/ppc/platforms/85xx/mpc85xx_ads_common.h
arch/ppc/platforms/85xx/stx_gp3.h
arch/ppc/platforms/chestnut.c
arch/ppc/platforms/chrp_pci.c
arch/ppc/platforms/chrp_setup.c
arch/ppc/platforms/chrp_smp.c
arch/ppc/platforms/chrp_time.c
arch/ppc/platforms/ev64360.c
arch/ppc/platforms/gemini_setup.c
arch/ppc/platforms/hdpu.c
arch/ppc/platforms/katana.c
arch/ppc/platforms/lite5200.c
arch/ppc/platforms/lopec.c
arch/ppc/platforms/pal4_setup.c
arch/ppc/platforms/pmac_backlight.c
arch/ppc/platforms/pmac_cpufreq.c
arch/ppc/platforms/pmac_feature.c
arch/ppc/platforms/pmac_nvram.c
arch/ppc/platforms/pmac_pci.c
arch/ppc/platforms/pmac_pic.c
arch/ppc/platforms/pmac_setup.c
arch/ppc/platforms/pmac_sleep.S
arch/ppc/platforms/pmac_smp.c
arch/ppc/platforms/pmac_time.c
arch/ppc/platforms/pplus.c
arch/ppc/platforms/prep_pci.c
arch/ppc/platforms/prep_setup.c
arch/ppc/platforms/radstone_ppc7d.c
arch/ppc/platforms/residual.c
arch/ppc/platforms/sandpoint.c
arch/ppc/syslib/Makefile
arch/ppc/syslib/btext.c
arch/ppc/syslib/gt64260_pic.c
arch/ppc/syslib/ibm440gx_common.c
arch/ppc/syslib/ibm44x_common.c
arch/ppc/syslib/m82xx_pci.c
arch/ppc/syslib/m8xx_setup.c
arch/ppc/syslib/mpc52xx_pci.c
arch/ppc/syslib/mpc83xx_devices.c
arch/ppc/syslib/mv64360_pic.c
arch/ppc/syslib/mv64x60.c
arch/ppc/syslib/mv64x60_dbg.c
arch/ppc/syslib/of_device.c [deleted file]
arch/ppc/syslib/open_pic.c
arch/ppc/syslib/open_pic2.c
arch/ppc/syslib/ppc403_pic.c
arch/ppc/syslib/ppc4xx_pic.c
arch/ppc/syslib/ppc4xx_setup.c
arch/ppc/syslib/ppc83xx_setup.c
arch/ppc/syslib/ppc85xx_setup.c
arch/ppc/syslib/pq2_devices.c
arch/ppc/syslib/prep_nvram.c
arch/ppc/syslib/prom.c
arch/ppc/syslib/xilinx_pic.c
arch/ppc/xmon/start.c
arch/ppc64/Kconfig
arch/ppc64/Makefile
arch/ppc64/kernel/HvLpEvent.c [deleted file]
arch/ppc64/kernel/Makefile
arch/ppc64/kernel/asm-offsets.c
arch/ppc64/kernel/bpa_iommu.c
arch/ppc64/kernel/bpa_setup.c
arch/ppc64/kernel/btext.c
arch/ppc64/kernel/cputable.c [deleted file]
arch/ppc64/kernel/eeh.c
arch/ppc64/kernel/entry.S
arch/ppc64/kernel/head.S
arch/ppc64/kernel/idle.c
arch/ppc64/kernel/kprobes.c
arch/ppc64/kernel/maple_pci.c
arch/ppc64/kernel/maple_setup.c
arch/ppc64/kernel/maple_time.c
arch/ppc64/kernel/misc.S
arch/ppc64/kernel/pci.c
arch/ppc64/kernel/pci_direct_iommu.c
arch/ppc64/kernel/pci_dn.c
arch/ppc64/kernel/pci_iommu.c
arch/ppc64/kernel/pmac.h [deleted file]
arch/ppc64/kernel/pmac_feature.c [deleted file]
arch/ppc64/kernel/pmac_pci.c [deleted file]
arch/ppc64/kernel/pmac_setup.c [deleted file]
arch/ppc64/kernel/pmac_smp.c [deleted file]
arch/ppc64/kernel/pmac_time.c [deleted file]
arch/ppc64/kernel/ppc_ksyms.c
arch/ppc64/kernel/prom.c
arch/ppc64/kernel/ptrace.c [deleted file]
arch/ppc64/kernel/rtas-proc.c
arch/ppc64/kernel/rtas.c
arch/ppc64/kernel/rtas_pci.c
arch/ppc64/kernel/rtc.c
arch/ppc64/kernel/setup.c
arch/ppc64/kernel/smp.c
arch/ppc64/kernel/traps.c [deleted file]
arch/ppc64/kernel/u3_iommu.c
arch/ppc64/kernel/vdso64/sigtramp.S
arch/ppc64/kernel/vecemu.c [deleted file]
arch/ppc64/kernel/vmlinux.lds.S
arch/ppc64/mm/Makefile [deleted file]
arch/ppc64/mm/init.c [deleted file]
arch/ppc64/oprofile/Kconfig [deleted file]
arch/ppc64/oprofile/Makefile [deleted file]
drivers/ide/ppc/pmac.c
drivers/macintosh/ans-lcd.c
drivers/macintosh/mediabay.c
drivers/macintosh/smu.c
drivers/macintosh/via-cuda.c
drivers/macintosh/via-pmu.c
drivers/macintosh/via-pmu68k.c
drivers/net/iseries_veth.c
fs/proc/proc_misc.c
include/asm-powerpc/a.out.h [moved from include/asm-ppc64/a.out.h with 69% similarity]
include/asm-powerpc/atomic.h [moved from include/asm-ppc/atomic.h with 85% similarity]
include/asm-powerpc/auxvec.h [moved from include/asm-ppc64/auxvec.h with 82% similarity]
include/asm-powerpc/backlight.h [moved from include/asm-ppc/backlight.h with 82% similarity]
include/asm-powerpc/bug.h [moved from include/asm-ppc64/bug.h with 62% similarity]
include/asm-powerpc/byteorder.h [moved from include/asm-ppc64/byteorder.h with 90% similarity]
include/asm-powerpc/cputable.h [new file with mode: 0644]
include/asm-powerpc/dbdma.h [moved from include/asm-ppc/dbdma.h with 100% similarity]
include/asm-powerpc/dma.h [moved from include/asm-ppc/dma.h with 86% similarity]
include/asm-powerpc/elf.h [moved from include/asm-ppc64/elf.h with 87% similarity]
include/asm-powerpc/hardirq.h [moved from include/asm-ppc/hardirq.h with 70% similarity]
include/asm-powerpc/heathrow.h [moved from include/asm-ppc/heathrow.h with 100% similarity]
include/asm-powerpc/hw_irq.h [moved from include/asm-ppc64/hw_irq.h with 64% similarity]
include/asm-powerpc/i8259.h [moved from include/asm-ppc/i8259.h with 62% similarity]
include/asm-powerpc/iommu.h [moved from include/asm-ppc64/iommu.h with 75% similarity]
include/asm-powerpc/irq.h [moved from include/asm-ppc/irq.h with 83% similarity]
include/asm-powerpc/kdebug.h [moved from include/asm-ppc64/kdebug.h with 78% similarity]
include/asm-powerpc/keylargo.h [moved from include/asm-ppc/keylargo.h with 100% similarity]
include/asm-powerpc/kmap_types.h [new file with mode: 0644]
include/asm-powerpc/kprobes.h [moved from include/asm-ppc64/kprobes.h with 95% similarity]
include/asm-powerpc/lmb.h [moved from include/asm-ppc64/lmb.h with 97% similarity]
include/asm-powerpc/machdep.h [moved from include/asm-ppc64/machdep.h with 58% similarity]
include/asm-powerpc/macio.h [moved from include/asm-ppc/macio.h with 100% similarity]
include/asm-powerpc/mediabay.h [moved from include/asm-ppc/mediabay.h with 100% similarity]
include/asm-powerpc/mpic.h [moved from arch/ppc64/kernel/mpic.h with 95% similarity]
include/asm-powerpc/of_device.h [moved from include/asm-ppc/of_device.h with 94% similarity]
include/asm-powerpc/ohare.h [moved from include/asm-ppc/ohare.h with 100% similarity]
include/asm-powerpc/oprofile_impl.h [moved from include/asm-ppc64/oprofile_impl.h with 83% similarity]
include/asm-powerpc/pSeries_reconfig.h [moved from include/asm-ppc64/pSeries_reconfig.h with 100% similarity]
include/asm-powerpc/pmac_feature.h [moved from include/asm-ppc/pmac_feature.h with 100% similarity]
include/asm-powerpc/pmac_low_i2c.h [moved from include/asm-ppc/pmac_low_i2c.h with 100% similarity]
include/asm-powerpc/pmc.h [moved from include/asm-ppc64/pmc.h with 66% similarity]
include/asm-powerpc/posix_types.h [moved from include/asm-ppc64/posix_types.h with 88% similarity]
include/asm-powerpc/ppc-pci.h [moved from arch/ppc64/kernel/pci.h with 94% similarity]
include/asm-powerpc/ppc_asm.h [moved from include/asm-ppc/ppc_asm.h with 54% similarity]
include/asm-powerpc/processor.h [moved from include/asm-ppc/processor.h with 51% similarity]
include/asm-powerpc/prom.h [new file with mode: 0644]
include/asm-powerpc/reg.h [moved from include/asm-ppc/reg.h with 68% similarity]
include/asm-powerpc/rtas.h [moved from include/asm-ppc64/rtas.h with 98% similarity]
include/asm-powerpc/rtc.h [new file with mode: 0644]
include/asm-powerpc/rwsem.h [moved from include/asm-ppc64/rwsem.h with 91% similarity]
include/asm-powerpc/seccomp.h [moved from include/asm-ppc64/seccomp.h with 66% similarity]
include/asm-powerpc/sections.h [moved from include/asm-ppc64/sections.h with 51% similarity]
include/asm-powerpc/semaphore.h [moved from include/asm-ppc64/semaphore.h with 95% similarity]
include/asm-powerpc/smu.h [moved from include/asm-ppc64/smu.h with 100% similarity]
include/asm-powerpc/spinlock_types.h [moved from include/asm-ppc64/spinlock_types.h with 80% similarity]
include/asm-powerpc/statfs.h [moved from include/asm-ppc64/statfs.h with 69% similarity]
include/asm-powerpc/synch.h [new file with mode: 0644]
include/asm-powerpc/system.h [new file with mode: 0644]
include/asm-powerpc/thread_info.h [moved from include/asm-ppc64/thread_info.h with 83% similarity]
include/asm-powerpc/time.h [new file with mode: 0644]
include/asm-powerpc/types.h [moved from include/asm-ppc64/types.h with 74% similarity]
include/asm-powerpc/uninorth.h [moved from include/asm-ppc/uninorth.h with 100% similarity]
include/asm-powerpc/unistd.h [moved from include/asm-ppc/unistd.h with 90% similarity]
include/asm-powerpc/vga.h [moved from include/asm-ppc64/vga.h with 74% similarity]
include/asm-powerpc/xmon.h [new file with mode: 0644]
include/asm-ppc/a.out.h [deleted file]
include/asm-ppc/auxvec.h [deleted file]
include/asm-ppc/bug.h [deleted file]
include/asm-ppc/byteorder.h [deleted file]
include/asm-ppc/cache.h
include/asm-ppc/cputable.h [deleted file]
include/asm-ppc/elf.h [deleted file]
include/asm-ppc/hw_irq.h [deleted file]
include/asm-ppc/io.h
include/asm-ppc/kmap_types.h [deleted file]
include/asm-ppc/machdep.h
include/asm-ppc/mmu_context.h
include/asm-ppc/open_pic.h
include/asm-ppc/page.h
include/asm-ppc/pci-bridge.h
include/asm-ppc/pci.h
include/asm-ppc/perfmon.h [deleted file]
include/asm-ppc/posix_types.h [deleted file]
include/asm-ppc/ptrace.h
include/asm-ppc/rwsem.h [deleted file]
include/asm-ppc/seccomp.h [deleted file]
include/asm-ppc/sections.h [deleted file]
include/asm-ppc/semaphore.h [deleted file]
include/asm-ppc/smp.h
include/asm-ppc/spinlock.h
include/asm-ppc/spinlock_types.h [deleted file]
include/asm-ppc/statfs.h [deleted file]
include/asm-ppc/system.h
include/asm-ppc/thread_info.h [deleted file]
include/asm-ppc/types.h [deleted file]
include/asm-ppc/vga.h [deleted file]
include/asm-ppc/xmon.h [deleted file]
include/asm-ppc64/abs_addr.h
include/asm-ppc64/atomic.h [deleted file]
include/asm-ppc64/bitops.h
include/asm-ppc64/btext.h
include/asm-ppc64/cputable.h [deleted file]
include/asm-ppc64/dart.h [new file with mode: 0644]
include/asm-ppc64/dbdma.h [deleted file]
include/asm-ppc64/dma.h [deleted file]
include/asm-ppc64/futex.h
include/asm-ppc64/hardirq.h [deleted file]
include/asm-ppc64/io.h
include/asm-ppc64/irq.h [deleted file]
include/asm-ppc64/keylargo.h [deleted file]
include/asm-ppc64/kmap_types.h [deleted file]
include/asm-ppc64/macio.h [deleted file]
include/asm-ppc64/memory.h [deleted file]
include/asm-ppc64/mmu.h
include/asm-ppc64/of_device.h [deleted file]
include/asm-ppc64/page.h
include/asm-ppc64/pci-bridge.h
include/asm-ppc64/pmac_feature.h [deleted file]
include/asm-ppc64/pmac_low_i2c.h [deleted file]
include/asm-ppc64/ppc32.h
include/asm-ppc64/ppc_asm.h [deleted file]
include/asm-ppc64/processor.h [deleted file]
include/asm-ppc64/prom.h
include/asm-ppc64/smp.h
include/asm-ppc64/system.h
include/asm-ppc64/tce.h [new file with mode: 0644]
include/asm-ppc64/time.h [deleted file]
include/asm-ppc64/tlbflush.h
include/asm-ppc64/udbg.h
include/asm-ppc64/uninorth.h [deleted file]
include/asm-ppc64/unistd.h [deleted file]
kernel/irq/handle.c

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
new file mode 100644 (file)
index 0000000..27f122e
--- /dev/null
@@ -0,0 +1,864 @@
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+mainmenu "Linux/PowerPC Kernel Configuration"
+
+config PPC64
+       bool "64-bit kernel"
+       default n
+       help
+         This option selects whether a 32-bit or a 64-bit kernel
+         will be built.
+
+config PPC32
+       bool
+       default y if !PPC64
+
+config 64BIT
+       bool
+       default y if PPC64
+
+config PPC_MERGE
+       def_bool y
+
+config MMU
+       bool
+       default y
+
+config UID16
+       bool
+
+config GENERIC_HARDIRQS
+       bool
+       default y
+
+config RWSEM_GENERIC_SPINLOCK
+       bool
+
+config RWSEM_XCHGADD_ALGORITHM
+       bool
+       default y
+
+config GENERIC_CALIBRATE_DELAY
+       bool
+       default y
+
+config PPC
+       bool
+       default y
+
+config EARLY_PRINTK
+       bool
+       default y if PPC64
+
+config COMPAT
+       bool
+       default y if PPC64
+
+config SYSVIPC_COMPAT
+       bool
+       depends on COMPAT && SYSVIPC
+       default y
+
+# All PPC32s use generic nvram driver through ppc_md
+config GENERIC_NVRAM
+       bool
+       default y if PPC32
+
+config SCHED_NO_NO_OMIT_FRAME_POINTER
+       bool
+       default y
+
+config ARCH_MAY_HAVE_PC_FDC
+       bool
+       default y
+
+menu "Processor support"
+choice
+       prompt "Processor Type"
+       depends on PPC32
+       default 6xx
+
+config 6xx
+       bool "6xx/7xx/74xx"
+       select PPC_FPU
+       help
+         There are four families of PowerPC chips supported.  The more common
+         types (601, 603, 604, 740, 750, 7400), the Motorola embedded
+         versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC
+         embedded versions (403 and 405) and the high end 64 bit Power
+         processors (POWER 3, POWER4, and IBM PPC970 also known as G5).
+         
+         Unless you are building a kernel for one of the embedded processor
+         systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
+         Note that the kernel runs in 32-bit mode even on 64-bit chips.
+
+config PPC_52xx
+       bool "Freescale 52xx"
+       
+config PPC_82xx
+       bool "Freescale 82xx"
+
+config PPC_83xx
+       bool "Freescale 83xx"
+
+config 40x
+       bool "AMCC 40x"
+
+config 44x
+       bool "AMCC 44x"
+
+config 8xx
+       bool "Freescale 8xx"
+
+config E200
+       bool "Freescale e200"
+
+config E500
+       bool "Freescale e500"
+endchoice
+
+config POWER4_ONLY
+       bool "Optimize for POWER4"
+       depends on PPC64
+       default n
+       ---help---
+         Cause the compiler to optimize for POWER4/POWER5/PPC970 processors.
+         The resulting binary will not work on POWER3 or RS64 processors
+         when compiled with binutils 2.15 or later.
+
+config POWER3
+       bool
+       depends on PPC64
+       default y if !POWER4_ONLY
+
+config POWER4
+       depends on PPC64
+       def_bool y
+
+config PPC_FPU
+       bool
+       default y if PPC64
+
+config BOOKE
+       bool
+       depends on E200 || E500
+       default y
+
+config FSL_BOOKE
+       bool
+       depends on E200 || E500
+       default y
+
+config PTE_64BIT
+       bool
+       depends on 44x || E500
+       default y if 44x
+       default y if E500 && PHYS_64BIT
+
+config PHYS_64BIT
+       bool 'Large physical address support' if E500
+       depends on 44x || E500
+       default y if 44x
+       ---help---
+         This option enables kernel support for larger than 32-bit physical
+         addresses.  This features is not be available on all e500 cores.
+
+         If in doubt, say N here.
+
+config ALTIVEC
+       bool "AltiVec Support"
+       depends on 6xx || POWER4
+       ---help---
+         This option enables kernel support for the Altivec extensions to the
+         PowerPC processor. The kernel currently supports saving and restoring
+         altivec registers, and turning on the 'altivec enable' bit so user
+         processes can execute altivec instructions.
+
+         This option is only usefully if you have a processor that supports
+         altivec (G4, otherwise known as 74xx series), but does not have
+         any affect on a non-altivec cpu (it does, however add code to the
+         kernel).
+
+         If in doubt, say Y here.
+
+config SPE
+       bool "SPE Support"
+       depends on E200 || E500
+       ---help---
+         This option enables kernel support for the Signal Processing
+         Extensions (SPE) to the PowerPC processor. The kernel currently
+         supports saving and restoring SPE registers, and turning on the
+         'spe enable' bit so user processes can execute SPE instructions.
+
+         This option is only useful if you have a processor that supports
+         SPE (e500, otherwise known as 85xx series), but does not have any
+         effect on a non-spe cpu (it does, however add code to the kernel).
+
+         If in doubt, say Y here.
+
+config PPC_STD_MMU
+       bool
+       depends on 6xx || POWER3 || POWER4 || PPC64
+       default y
+
+config PPC_STD_MMU_32
+       def_bool y
+       depends on PPC_STD_MMU && PPC32
+
+config SMP
+       depends on PPC_STD_MMU
+       bool "Symmetric multi-processing support"
+       ---help---
+         This enables support for systems with more than one CPU. If you have
+         a system with only one CPU, say N. If you have a system with more
+         than one CPU, say Y.  Note that the kernel does not currently
+         support SMP machines with 603/603e/603ev or PPC750 ("G3") processors
+         since they have inadequate hardware support for multiprocessor
+         operation.
+
+         If you say N here, the kernel will run on single and multiprocessor
+         machines, but will use only one CPU of a multiprocessor machine. If
+         you say Y here, the kernel will run on single-processor machines.
+         On a single-processor machine, the kernel will run faster if you say
+         N here.
+
+         If you don't know what to do here, say N.
+
+config NR_CPUS
+       int "Maximum number of CPUs (2-32)"
+       range 2 128
+       depends on SMP
+       default "32" if PPC64
+       default "4"
+
+config NOT_COHERENT_CACHE
+       bool
+       depends on 4xx || 8xx || E200
+       default y
+endmenu
+
+source "init/Kconfig"
+
+menu "Platform support"
+       depends on PPC64 || 6xx
+
+choice
+       prompt "Machine type"
+       default PPC_MULTIPLATFORM
+
+config PPC_MULTIPLATFORM
+       bool "Generic desktop/server/laptop"
+       help
+         Select this option if configuring for an IBM pSeries or
+         RS/6000 machine, an Apple machine, or a PReP, CHRP,
+         Maple or Cell-based machine.
+
+config PPC_ISERIES
+       bool "IBM Legacy iSeries"
+       depends on PPC64
+
+config EMBEDDED6xx
+       bool "Embedded 6xx/7xx/7xxx-based board"
+       depends on PPC32
+
+config APUS
+       bool "Amiga-APUS"
+       depends on PPC32 && BROKEN
+       help
+         Select APUS if configuring for a PowerUP Amiga.
+         More information is available at:
+         <http://linux-apus.sourceforge.net/>.
+endchoice
+
+config PPC_PSERIES
+       depends on PPC_MULTIPLATFORM && PPC64
+       bool "  IBM pSeries & new (POWER5-based) iSeries"
+       default y
+
+config PPC_CHRP
+       bool "  Common Hardware Reference Platform (CHRP) based machines"
+       depends on PPC_MULTIPLATFORM && PPC32
+       default y
+
+config PPC_PMAC
+       bool "  Apple PowerMac based machines"
+       depends on PPC_MULTIPLATFORM
+       default y
+
+config PPC_PMAC64
+       bool
+       depends on PPC_PMAC && POWER4
+       select U3_DART
+       default y
+
+config PPC_PREP
+       bool "  PowerPC Reference Platform (PReP) based machines"
+       depends on PPC_MULTIPLATFORM && PPC32
+       default y
+
+config PPC_MAPLE
+       depends on PPC_MULTIPLATFORM && PPC64
+       bool "  Maple 970FX Evaluation Board"
+       select U3_DART
+       select MPIC_BROKEN_U3
+       default n
+       help
+          This option enables support for the Maple 970FX Evaluation Board.
+         For more informations, refer to <http://www.970eval.com>
+
+config PPC_BPA
+       bool "  Broadband Processor Architecture"
+       depends on PPC_MULTIPLATFORM && PPC64
+
+config PPC_OF
+       bool
+       depends on PPC_MULTIPLATFORM    # for now
+       default y
+
+config XICS
+       depends on PPC_PSERIES
+       bool
+       default y
+
+config U3_DART
+       bool 
+       depends on PPC_MULTIPLATFORM && PPC64
+       default n
+
+config MPIC
+       depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
+       bool
+       default y
+
+config MPIC_BROKEN_U3
+       bool
+       depends on PPC_MAPLE
+       default y
+
+config BPA_IIC
+       depends on PPC_BPA
+       bool
+       default y
+
+config IBMVIO
+       depends on PPC_PSERIES || PPC_ISERIES
+       bool
+       default y
+
+source "drivers/cpufreq/Kconfig"
+
+config CPU_FREQ_PMAC
+       bool "Support for Apple PowerBooks"
+       depends on CPU_FREQ && ADB_PMU && PPC32
+       select CPU_FREQ_TABLE
+       help
+         This adds support for frequency switching on Apple PowerBooks,
+         this currently includes some models of iBook & Titanium
+         PowerBook.
+
+config PPC601_SYNC_FIX
+       bool "Workarounds for PPC601 bugs"
+       depends on 6xx && (PPC_PREP || PPC_PMAC)
+       help
+         Some versions of the PPC601 (the first PowerPC chip) have bugs which
+         mean that extra synchronization instructions are required near
+         certain instructions, typically those that make major changes to the
+         CPU state.  These extra instructions reduce performance slightly.
+         If you say N here, these extra instructions will not be included,
+         resulting in a kernel which will run faster but may not run at all
+         on some systems with the PPC601 chip.
+
+         If in doubt, say Y here.
+
+config TAU
+       bool "Thermal Management Support"
+       depends on 6xx
+       help
+         G3 and G4 processors have an on-chip temperature sensor called the
+         'Thermal Assist Unit (TAU)', which, in theory, can measure the on-die
+         temperature within 2-4 degrees Celsius. This option shows the current
+         on-die temperature in /proc/cpuinfo if the cpu supports it.
+
+         Unfortunately, on some chip revisions, this sensor is very inaccurate
+         and in some cases, does not work at all, so don't assume the cpu
+         temp is actually what /proc/cpuinfo says it is.
+
+config TAU_INT
+       bool "Interrupt driven TAU driver (DANGEROUS)"
+       depends on TAU
+       ---help---
+         The TAU supports an interrupt driven mode which causes an interrupt
+         whenever the temperature goes out of range. This is the fastest way
+         to get notified the temp has exceeded a range. With this option off,
+         a timer is used to re-check the temperature periodically.
+
+         However, on some cpus it appears that the TAU interrupt hardware
+         is buggy and can cause a situation which would lead unexplained hard
+         lockups.
+
+         Unless you are extending the TAU driver, or enjoy kernel/hardware
+         debugging, leave this option off.
+
+config TAU_AVERAGE
+       bool "Average high and low temp"
+       depends on TAU
+       ---help---
+         The TAU hardware can compare the temperature to an upper and lower
+         bound.  The default behavior is to show both the upper and lower
+         bound in /proc/cpuinfo. If the range is large, the temperature is
+         either changing a lot, or the TAU hardware is broken (likely on some
+         G4's). If the range is small (around 4 degrees), the temperature is
+         relatively stable.  If you say Y here, a single temperature value,
+         halfway between the upper and lower bounds, will be reported in
+         /proc/cpuinfo.
+
+         If in doubt, say N here.
+endmenu
+
+source arch/powerpc/platforms/embedded6xx/Kconfig
+source arch/powerpc/platforms/4xx/Kconfig
+source arch/powerpc/platforms/85xx/Kconfig
+source arch/powerpc/platforms/8xx/Kconfig
+
+menu "Kernel options"
+
+config HIGHMEM
+       bool "High memory support"
+       depends on PPC32
+
+source kernel/Kconfig.hz
+source kernel/Kconfig.preempt
+source "fs/Kconfig.binfmt"
+
+# We optimistically allocate largepages from the VM, so make the limit
+# large enough (16MB). This badly named config option is actually
+# max order + 1
+config FORCE_MAX_ZONEORDER
+       int
+       depends on PPC64
+       default "13"
+
+config MATH_EMULATION
+       bool "Math emulation"
+       depends on 4xx || 8xx || E200 || E500
+       ---help---
+         Some PowerPC chips designed for embedded applications do not have
+         a floating-point unit and therefore do not implement the
+         floating-point instructions in the PowerPC instruction set.  If you
+         say Y here, the kernel will include code to emulate a floating-point
+         unit, which will allow programs that use floating-point
+         instructions to run.
+
+config IOMMU_VMERGE
+       bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
+       depends on EXPERIMENTAL && PPC64
+       default n
+       help
+         Cause IO segments sent to a device for DMA to be merged virtually
+         by the IOMMU when they happen to have been allocated contiguously.
+         This doesn't add pressure to the IOMMU allocator. However, some
+         drivers don't support getting large merged segments coming back
+         from *_map_sg(). Say Y if you know the drivers you are using are
+         properly handling this case.
+
+config HOTPLUG_CPU
+       bool "Support for enabling/disabling CPUs"
+       depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
+       ---help---
+         Say Y here to be able to disable and re-enable individual
+         CPUs at runtime on SMP machines.
+
+         Say N if you are unsure.
+
+config KEXEC
+       bool "kexec system call (EXPERIMENTAL)"
+       depends on PPC_MULTIPLATFORM && EXPERIMENTAL
+       help
+         kexec is a system call that implements the ability to shutdown your
+         current kernel, and to start another kernel.  It is like a reboot
+         but it is indepedent of the system firmware.   And like a reboot
+         you can start any kernel with it, not just Linux.
+
+         The name comes from the similiarity to the exec system call.
+
+         It is an ongoing process to be certain the hardware in a machine
+         is properly shutdown, so do not be surprised if this code does not
+         initially work for you.  It may help to enable device hotplugging
+         support.  As of this writing the exact hardware interface is
+         strongly in flux, so no good recommendation can be made.
+
+config EMBEDDEDBOOT
+       bool
+       depends on 8xx || 8260
+       default y
+
+config PC_KEYBOARD
+       bool "PC PS/2 style Keyboard"
+       depends on 4xx || CPM2
+
+config PPCBUG_NVRAM
+       bool "Enable reading PPCBUG NVRAM during boot" if PPLUS || LOPEC
+       default y if PPC_PREP
+
+config IRQ_ALL_CPUS
+       bool "Distribute interrupts on all CPUs by default"
+       depends on SMP && !MV64360
+       help
+         This option gives the kernel permission to distribute IRQs across
+         multiple CPUs.  Saying N here will route all IRQs to the first
+         CPU.  Generally saying Y is safe, although some problems have been
+         reported with SMP Power Macintoshes with this option enabled.
+
+source "arch/powerpc/platforms/pseries/Kconfig"
+
+config ARCH_SELECT_MEMORY_MODEL
+       def_bool y
+       depends on PPC64
+
+config ARCH_FLATMEM_ENABLE
+       def_bool y
+       depends on PPC64 && !NUMA
+
+config ARCH_DISCONTIGMEM_ENABLE
+       def_bool y
+       depends on SMP && PPC_PSERIES
+
+config ARCH_DISCONTIGMEM_DEFAULT
+       def_bool y
+       depends on ARCH_DISCONTIGMEM_ENABLE
+
+config ARCH_FLATMEM_ENABLE
+       def_bool y
+       depends on PPC64
+
+config ARCH_SPARSEMEM_ENABLE
+       def_bool y
+       depends on ARCH_DISCONTIGMEM_ENABLE
+
+source "mm/Kconfig"
+
+config HAVE_ARCH_EARLY_PFN_TO_NID
+       def_bool y
+       depends on NEED_MULTIPLE_NODES
+
+# Some NUMA nodes have memory ranges that span
+# other nodes.  Even though a pfn is valid and
+# between a node's start and end pfns, it may not
+# reside on that node.
+#
+# This is a relatively temporary hack that should
+# be able to go away when sparsemem is fully in
+# place
+
+config NODES_SPAN_OTHER_NODES
+       def_bool y
+       depends on NEED_MULTIPLE_NODES
+
+config NUMA
+       bool "NUMA support"
+       default y if DISCONTIGMEM || SPARSEMEM
+
+config SCHED_SMT
+       bool "SMT (Hyperthreading) scheduler support"
+       depends on PPC64 && SMP
+       default off
+       help
+         SMT scheduler support improves the CPU scheduler's decision making
+         when dealing with POWER5 cpus at a cost of slightly increased
+         overhead in some places. If unsure say N here.
+
+config PROC_DEVICETREE
+       bool "Support for device tree in /proc"
+       depends on PROC_FS
+       help
+         This option adds a device-tree directory under /proc which contains
+         an image of the device tree that the kernel copies from Open
+         Firmware or other boot firmware. If unsure, say Y here.
+
+source "arch/powerpc/platforms/prep/Kconfig"
+
+config CMDLINE_BOOL
+       bool "Default bootloader kernel arguments"
+       depends on !PPC_ISERIES
+
+config CMDLINE
+       string "Initial kernel command string"
+       depends on CMDLINE_BOOL
+       default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
+       help
+         On some platforms, there is currently no way for the boot loader to
+         pass arguments to the kernel. For these platforms, you can supply
+         some command-line options at build time by entering them here.  In
+         most cases you will need to specify the root device here.
+
+if !44x || BROKEN
+source kernel/power/Kconfig
+endif
+
+config SECCOMP
+       bool "Enable seccomp to safely compute untrusted bytecode"
+       depends on PROC_FS
+       default y
+       help
+         This kernel feature is useful for number crunching applications
+         that may need to compute untrusted bytecode during their
+         execution. By using pipes or other transports made available to
+         the process as file descriptors supporting the read/write
+         syscalls, it's possible to isolate those applications in
+         their own address space using seccomp. Once seccomp is
+         enabled via /proc/<pid>/seccomp, it cannot be disabled
+         and the task is only allowed to execute a few safe syscalls
+         defined by each seccomp mode.
+
+         If unsure, say Y. Only embedded should say N here.
+
+endmenu
+
+config ISA_DMA_API
+       bool
+       default y
+
+menu "Bus options"
+
+config ISA
+       bool "Support for ISA-bus hardware"
+       depends on PPC_PREP || PPC_CHRP
+       help
+         Find out whether you have ISA slots on your motherboard.  ISA is the
+         name of a bus system, i.e. the way the CPU talks to the other stuff
+         inside your box.  If you have an Apple machine, say N here; if you
+         have an IBM RS/6000 or pSeries machine or a PReP machine, say Y.  If
+         you have an embedded board, consult your board documentation.
+
+config GENERIC_ISA_DMA
+       bool
+       depends on PPC64 || POWER4 || 6xx && !CPM2
+       default y
+
+config EISA
+       bool
+
+config SBUS
+       bool
+
+# Yes MCA RS/6000s exist but Linux-PPC does not currently support any
+config MCA
+       bool
+
+config PCI
+       bool "PCI support" if 40x || CPM2 || 83xx || 85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
+       default y if !40x && !CPM2 && !8xx && !APUS && !83xx && !85xx
+       default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
+       default PCI_QSPAN if !4xx && !CPM2 && 8xx
+       help
+         Find out whether your system includes a PCI bus. PCI is the name of
+         a bus system, i.e. the way the CPU talks to the other stuff inside
+         your box.  If you say Y here, the kernel will include drivers and
+         infrastructure code to support PCI bus devices.
+
+config PCI_DOMAINS
+       bool
+       default PCI
+
+config MPC83xx_PCI2
+       bool "  Supprt for 2nd PCI host controller"
+       depends on PCI && MPC834x
+       default y if MPC834x_SYS
+
+config PCI_QSPAN
+       bool "QSpan PCI"
+       depends on !4xx && !CPM2 && 8xx
+       help
+         Say Y here if you have a system based on a Motorola 8xx-series
+         embedded processor with a QSPAN PCI interface, otherwise say N.
+
+config PCI_8260
+       bool
+       depends on PCI && 8260
+       default y
+
+config 8260_PCI9
+       bool "  Enable workaround for MPC826x erratum PCI 9"
+       depends on PCI_8260 && !ADS8272
+       default y
+
+choice
+       prompt "  IDMA channel for PCI 9 workaround"
+       depends on 8260_PCI9
+
+config 8260_PCI9_IDMA1
+       bool "IDMA1"
+
+config 8260_PCI9_IDMA2
+       bool "IDMA2"
+
+config 8260_PCI9_IDMA3
+       bool "IDMA3"
+
+config 8260_PCI9_IDMA4
+       bool "IDMA4"
+
+endchoice
+
+source "drivers/pci/Kconfig"
+
+source "drivers/pcmcia/Kconfig"
+
+source "drivers/pci/hotplug/Kconfig"
+
+endmenu
+
+menu "Advanced setup"
+       depends on PPC32
+
+config ADVANCED_OPTIONS
+       bool "Prompt for advanced kernel configuration options"
+       help
+         This option will enable prompting for a variety of advanced kernel
+         configuration options.  These options can cause the kernel to not
+         work if they are set incorrectly, but can be used to optimize certain
+         aspects of kernel memory management.
+
+         Unless you know what you are doing, say N here.
+
+comment "Default settings for advanced configuration options are used"
+       depends on !ADVANCED_OPTIONS
+
+config HIGHMEM_START_BOOL
+       bool "Set high memory pool address"
+       depends on ADVANCED_OPTIONS && HIGHMEM
+       help
+         This option allows you to set the base address of the kernel virtual
+         area used to map high memory pages.  This can be useful in
+         optimizing the layout of kernel virtual memory.
+
+         Say N here unless you know what you are doing.
+
+config HIGHMEM_START
+       hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
+       default "0xfe000000"
+
+config LOWMEM_SIZE_BOOL
+       bool "Set maximum low memory"
+       depends on ADVANCED_OPTIONS
+       help
+         This option allows you to set the maximum amount of memory which
+         will be used as "low memory", that is, memory which the kernel can
+         access directly, without having to set up a kernel virtual mapping.
+         This can be useful in optimizing the layout of kernel virtual
+         memory.
+
+         Say N here unless you know what you are doing.
+
+config LOWMEM_SIZE
+       hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
+       default "0x30000000"
+
+config KERNEL_START_BOOL
+       bool "Set custom kernel base address"
+       depends on ADVANCED_OPTIONS
+       help
+         This option allows you to set the kernel virtual address at which
+         the kernel will map low memory (the kernel image will be linked at
+         this address).  This can be useful in optimizing the virtual memory
+         layout of the system.
+
+         Say N here unless you know what you are doing.
+
+config KERNEL_START
+       hex "Virtual address of kernel base" if KERNEL_START_BOOL
+       default "0xc0000000"
+
+config TASK_SIZE_BOOL
+       bool "Set custom user task size"
+       depends on ADVANCED_OPTIONS
+       help
+         This option allows you to set the amount of virtual address space
+         allocated to user tasks.  This can be useful in optimizing the
+         virtual memory layout of the system.
+
+         Say N here unless you know what you are doing.
+
+config TASK_SIZE
+       hex "Size of user task space" if TASK_SIZE_BOOL
+       default "0x80000000"
+
+config CONSISTENT_START_BOOL
+       bool "Set custom consistent memory pool address"
+       depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
+       help
+         This option allows you to set the base virtual address
+         of the the consistent memory pool.  This pool of virtual
+         memory is used to make consistent memory allocations.
+
+config CONSISTENT_START
+       hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
+       default "0xff100000" if NOT_COHERENT_CACHE
+
+config CONSISTENT_SIZE_BOOL
+       bool "Set custom consistent memory pool size"
+       depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
+       help
+         This option allows you to set the size of the the
+         consistent memory pool.  This pool of virtual memory
+         is used to make consistent memory allocations.
+
+config CONSISTENT_SIZE
+       hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
+       default "0x00200000" if NOT_COHERENT_CACHE
+
+config BOOT_LOAD_BOOL
+       bool "Set the boot link/load address"
+       depends on ADVANCED_OPTIONS && !PPC_MULTIPLATFORM
+       help
+         This option allows you to set the initial load address of the zImage
+         or zImage.initrd file.  This can be useful if you are on a board
+         which has a small amount of memory.
+
+         Say N here unless you know what you are doing.
+
+config BOOT_LOAD
+       hex "Link/load address for booting" if BOOT_LOAD_BOOL
+       default "0x00400000" if 40x || 8xx || 8260
+       default "0x01000000" if 44x
+       default "0x00800000"
+
+config PIN_TLB
+       bool "Pinned Kernel TLBs (860 ONLY)"
+       depends on ADVANCED_OPTIONS && 8xx
+endmenu
+
+if PPC64
+config KERNEL_START
+       hex
+       default "0xc000000000000000"
+endif
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+# XXX source "arch/ppc/8xx_io/Kconfig"
+
+# XXX source "arch/ppc/8260_io/Kconfig"
+
+source "arch/powerpc/platforms/iseries/Kconfig"
+
+source "lib/Kconfig"
+
+source "arch/powerpc/oprofile/Kconfig"
+
+source "arch/powerpc/Kconfig.debug"
+
+source "security/Kconfig"
+
+config KEYS_COMPAT
+       bool
+       depends on COMPAT && KEYS
+       default y
+
+source "crypto/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
new file mode 100644 (file)
index 0000000..0baf64e
--- /dev/null
@@ -0,0 +1,128 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config DEBUG_STACKOVERFLOW
+       bool "Check for stack overflows"
+       depends on DEBUG_KERNEL && PPC64
+       help
+         This option will cause messages to be printed if free stack space
+         drops below a certain limit.
+
+config KPROBES
+       bool "Kprobes"
+       depends on DEBUG_KERNEL && PPC64
+       help
+         Kprobes allows you to trap at almost any kernel address and
+         execute a callback function.  register_kprobe() establishes
+         a probepoint and specifies the callback.  Kprobes is useful
+         for kernel debugging, non-intrusive instrumentation and testing.
+         If in doubt, say "N".
+
+config DEBUG_STACK_USAGE
+       bool "Stack utilization instrumentation"
+       depends on DEBUG_KERNEL && PPC64
+       help
+         Enables the display of the minimum amount of free stack which each
+         task has ever had available in the sysrq-T and sysrq-P debug output.
+
+         This option will slow down process creation somewhat.
+
+config DEBUGGER
+       bool "Enable debugger hooks"
+       depends on DEBUG_KERNEL
+       help
+         Include in-kernel hooks for kernel debuggers. Unless you are
+         intending to debug the kernel, say N here.
+
+config KGDB
+       bool "Include kgdb kernel debugger"
+       depends on DEBUGGER && (BROKEN || PPC_GEN550 || 4xx)
+       select DEBUG_INFO
+       help
+         Include in-kernel hooks for kgdb, the Linux kernel source level
+         debugger.  See <http://kgdb.sourceforge.net/> for more information.
+         Unless you are intending to debug the kernel, say N here.
+
+choice
+       prompt "Serial Port"
+       depends on KGDB
+       default KGDB_TTYS1
+
+config KGDB_TTYS0
+       bool "ttyS0"
+
+config KGDB_TTYS1
+       bool "ttyS1"
+
+config KGDB_TTYS2
+       bool "ttyS2"
+
+config KGDB_TTYS3
+       bool "ttyS3"
+
+endchoice
+
+config KGDB_CONSOLE
+       bool "Enable serial console thru kgdb port"
+       depends on KGDB && 8xx || CPM2
+       help
+         If you enable this, all serial console messages will be sent
+         over the gdb stub.
+         If unsure, say N.
+
+config XMON
+       bool "Include xmon kernel debugger"
+       depends on DEBUGGER && !PPC_ISERIES
+       help
+         Include in-kernel hooks for the xmon kernel monitor/debugger.
+         Unless you are intending to debug the kernel, say N here.
+         Make sure to enable also CONFIG_BOOTX_TEXT on Macs. Otherwise
+         nothing will appear on the screen (xmon writes directly to the
+         framebuffer memory).
+         The cmdline option 'xmon' or 'xmon=early' will drop into xmon
+         very early during boot. 'xmon=on' will just enable the xmon
+         debugger hooks.  'xmon=off' will disable the debugger hooks
+         if CONFIG_XMON_DEFAULT is set.
+
+config XMON_DEFAULT
+       bool "Enable xmon by default"
+       depends on XMON
+       help
+         xmon is normally disabled unless booted with 'xmon=on'.
+         Use 'xmon=off' to disable xmon init during runtime.
+
+config IRQSTACKS
+       bool "Use separate kernel stacks when processing interrupts"
+       depends on PPC64
+       help
+         If you say Y here the kernel will use separate kernel stacks
+         for handling hard and soft interrupts.  This can help avoid
+         overflowing the process kernel stacks.
+
+config BDI_SWITCH
+       bool "Include BDI-2000 user context switcher"
+       depends on DEBUG_KERNEL && PPC32
+       help
+         Include in-kernel support for the Abatron BDI2000 debugger.
+         Unless you are intending to debug the kernel with one of these
+         machines, say N here.
+
+config BOOTX_TEXT
+       bool "Support for early boot text console (BootX or OpenFirmware only)"
+       depends PPC_OF && !PPC_ISERIES
+       help
+         Say Y here to see progress messages from the boot firmware in text
+         mode. Requires either BootX or Open Firmware.
+
+config SERIAL_TEXT_DEBUG
+       bool "Support for early boot texts over serial port"
+       depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \
+               PPC_GEN550 || PPC_MPC52xx
+
+config PPC_OCP
+       bool
+       depends on IBM_OCP || XILINX_OCP
+       default y
+
+endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
new file mode 100644 (file)
index 0000000..dedf121
--- /dev/null
@@ -0,0 +1,219 @@
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+# Changes for PPC by Gary Thomas
+# Rewritten by Cort Dougan and Paul Mackerras
+#
+
+# This must match PAGE_OFFSET in include/asm-powerpc/page.h.
+KERNELLOAD     := $(CONFIG_KERNEL_START)
+
+HAS_BIARCH     := $(call cc-option-yn, -m32)
+
+ifeq ($(CONFIG_PPC64),y)
+OLDARCH        := ppc64
+SZ     := 64
+
+# Set default 32 bits cross compilers for vdso and boot wrapper
+CROSS32_COMPILE ?=
+
+CROSS32CC              := $(CROSS32_COMPILE)gcc
+CROSS32AS              := $(CROSS32_COMPILE)as
+CROSS32LD              := $(CROSS32_COMPILE)ld
+CROSS32OBJCOPY         := $(CROSS32_COMPILE)objcopy
+
+ifeq ($(HAS_BIARCH),y)
+ifeq ($(CROSS32_COMPILE),)
+CROSS32CC      := $(CC) -m32
+CROSS32AS      := $(AS) -a32
+CROSS32LD      := $(LD) -m elf32ppc
+CROSS32OBJCOPY := $(OBJCOPY)
+endif
+endif
+
+export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY
+
+new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
+
+ifeq ($(new_nm),y)
+NM             := $(NM) --synthetic
+endif
+
+else
+OLDARCH        := ppc
+SZ     := 32
+endif
+
+UTS_MACHINE := $(OLDARCH)
+
+ifeq ($(HAS_BIARCH),y)
+override AS    += -a$(SZ)
+override LD    += -m elf$(SZ)ppc
+override CC    += -m$(SZ)
+endif
+
+LDFLAGS_vmlinux        := -Ttext $(KERNELLOAD) -Bstatic -e $(KERNELLOAD)
+
+# The -Iarch/$(ARCH)/include is temporary while we are merging
+CPPFLAGS       += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
+AFLAGS         += -Iarch/$(ARCH)
+CFLAGS         += -Iarch/$(ARCH) -msoft-float -pipe
+CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none  -mcall-aixdesc
+CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
+CFLAGS         += $(CFLAGS-y)
+CPP            = $(CC) -E $(CFLAGS)
+# Temporary hack until we have migrated to asm-powerpc
+LINUXINCLUDE    += -Iarch/$(ARCH)/include
+
+CHECKFLAGS     += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__
+
+ifeq ($(CONFIG_PPC64),y)
+GCC_VERSION     := $(call cc-version)
+GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi)
+
+ifeq ($(CONFIG_POWER4_ONLY),y)
+ifeq ($(CONFIG_ALTIVEC),y)
+ifeq ($(GCC_BROKEN_VEC),y)
+       CFLAGS += $(call cc-option,-mcpu=970)
+else
+       CFLAGS += $(call cc-option,-mcpu=power4)
+endif
+else
+       CFLAGS += $(call cc-option,-mcpu=power4)
+endif
+else
+       CFLAGS += $(call cc-option,-mtune=power4)
+endif
+endif
+
+# Enable unit-at-a-time mode when possible. It shrinks the
+# kernel considerably.
+CFLAGS += $(call cc-option,-funit-at-a-time)
+
+ifndef CONFIG_FSL_BOOKE
+CFLAGS         += -mstring
+endif
+
+cpu-as-$(CONFIG_PPC64BRIDGE)   += -Wa,-mppc64bridge
+cpu-as-$(CONFIG_4xx)           += -Wa,-m405
+cpu-as-$(CONFIG_6xx)           += -Wa,-maltivec
+cpu-as-$(CONFIG_POWER4)                += -Wa,-maltivec
+cpu-as-$(CONFIG_E500)          += -Wa,-me500
+cpu-as-$(CONFIG_E200)          += -Wa,-me200
+
+AFLAGS += $(cpu-as-y)
+CFLAGS += $(cpu-as-y)
+
+# Default to the common case.
+KBUILD_DEFCONFIG := common_defconfig
+
+head-y                         := arch/powerpc/kernel/head_32.o
+head-$(CONFIG_PPC64)           := arch/powerpc/kernel/head_64.o
+head-$(CONFIG_8xx)             := arch/powerpc/kernel/head_8xx.o
+head-$(CONFIG_4xx)             := arch/powerpc/kernel/head_4xx.o
+head-$(CONFIG_44x)             := arch/powerpc/kernel/head_44x.o
+head-$(CONFIG_FSL_BOOKE)       := arch/powerpc/kernel/head_fsl_booke.o
+
+head-$(CONFIG_PPC64)           += arch/powerpc/kernel/entry_64.o
+head-$(CONFIG_PPC_FPU)         += arch/powerpc/kernel/fpu.o
+
+core-y                         += arch/powerpc/kernel/ \
+                                  arch/$(OLDARCH)/kernel/ \
+                                  arch/powerpc/mm/ \
+                                  arch/powerpc/lib/ \
+                                  arch/powerpc/sysdev/ \
+                                  arch/powerpc/platforms/
+core-$(CONFIG_MATH_EMULATION)  += arch/ppc/math-emu/
+#core-$(CONFIG_XMON)           += arch/powerpc/xmon/
+core-$(CONFIG_APUS)            += arch/ppc/amiga/
+drivers-$(CONFIG_8xx)          += arch/ppc/8xx_io/
+drivers-$(CONFIG_4xx)          += arch/ppc/4xx_io/
+drivers-$(CONFIG_CPM2)         += arch/ppc/8260_io/
+
+drivers-$(CONFIG_OPROFILE)     += arch/powerpc/oprofile/
+
+defaultimage-$(CONFIG_PPC32)   := uImage zImage
+defaultimage-$(CONFIG_PPC_ISERIES) := vmlinux
+defaultimage-$(CONFIG_PPC_PSERIES) := zImage
+KBUILD_IMAGE := $(defaultimage-y)
+all: $(KBUILD_IMAGE)
+
+CPPFLAGS_vmlinux.lds   := -Upowerpc
+
+# All the instructions talk about "make bzImage".
+bzImage: zImage
+
+BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
+
+.PHONY: $(BOOT_TARGETS)
+
+boot := arch/$(OLDARCH)/boot
+
+# urk
+ifeq ($(CONFIG_PPC64),y)
+$(BOOT_TARGETS): vmlinux
+       $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+else
+$(BOOT_TARGETS): vmlinux
+       $(Q)$(MAKE) ARCH=ppc $(build)=$(boot) $@
+endif
+
+uImage: vmlinux
+       $(Q)$(MAKE) ARCH=$(OLDARCH) $(build)=$(boot)/images $(boot)/images/$@
+
+define archhelp
+  @echo '* zImage          - Compressed kernel image (arch/$(ARCH)/boot/images/zImage.*)'
+  @echo '  uImage          - Create a bootable image for U-Boot / PPCBoot'
+  @echo '  install         - Install kernel using'
+  @echo '                    (your) ~/bin/installkernel or'
+  @echo '                    (distribution) /sbin/installkernel or'
+  @echo '                    install to $$(INSTALL_PATH) and run lilo'
+  @echo '  *_defconfig     - Select default config from arch/$(ARCH)/ppc/configs'
+endef
+
+archclean:
+       $(Q)$(MAKE) $(clean)=$(boot)
+       # Temporary hack until we have migrated to asm-powerpc
+       $(Q)rm -rf arch/$(ARCH)/include
+
+archprepare: checkbin
+
+# Temporary hack until we have migrated to asm-powerpc
+include/asm: arch/$(ARCH)/include/asm
+arch/$(ARCH)/include/asm:
+       $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
+       $(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm
+
+# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
+# to stdout and these checks are run even on install targets.
+TOUT   := .tmp_gas_check
+# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
+# instructions.
+# gcc-3.4 and binutils-2.14 are a fatal combination.
+GCC_VERSION    := $(call cc-version)
+
+checkbin:
+       @if test "$(GCC_VERSION)" = "0304" ; then \
+               if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
+                       echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
+                       echo 'correctly with gcc-3.4 and your version of binutils.'; \
+                       echo '*** Please upgrade your binutils or downgrade your gcc'; \
+                       false; \
+               fi ; \
+       fi
+       @if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
+               echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
+               echo 'correctly with old versions of binutils.' ; \
+               echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
+               false ; \
+       fi
+
+CLEAN_FILES += $(TOUT)
+
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
new file mode 100644 (file)
index 0000000..6b0f176
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Makefile for the linux kernel.
+#
+
+ifeq ($(CONFIG_PPC64),y)
+EXTRA_CFLAGS   += -mno-minimal-toc
+endif
+ifeq ($(CONFIG_PPC32),y)
+CFLAGS_prom_init.o      += -fPIC
+CFLAGS_btext.o         += -fPIC
+endif
+
+obj-y                          := semaphore.o cputable.o ptrace.o syscalls.o \
+                                  signal_32.o pmc.o
+obj-$(CONFIG_PPC64)            += binfmt_elf32.o sys_ppc32.o ptrace32.o
+obj-$(CONFIG_ALTIVEC)          += vecemu.o vector.o
+obj-$(CONFIG_POWER4)           += idle_power4.o
+obj-$(CONFIG_PPC_OF)           += of_device.o
+
+ifeq ($(CONFIG_PPC_MERGE),y)
+
+extra-$(CONFIG_PPC_STD_MMU)    := head_32.o
+extra-$(CONFIG_PPC64)          := head_64.o
+extra-$(CONFIG_40x)            := head_4xx.o
+extra-$(CONFIG_44x)            := head_44x.o
+extra-$(CONFIG_FSL_BOOKE)      := head_fsl_booke.o
+extra-$(CONFIG_8xx)            := head_8xx.o
+extra-$(CONFIG_PPC64)          += entry_64.o
+extra-$(CONFIG_PPC_FPU)                += fpu.o
+extra-y                                += vmlinux.lds
+
+obj-y                          += process.o init_task.o time.o \
+                                  prom.o systbl.o traps.o
+obj-$(CONFIG_PPC32)            += entry_32.o idle_6xx.o setup_32.o misc_32.o
+obj-$(CONFIG_PPC64)            += setup_64.o misc_64.o
+obj-$(CONFIG_PPC_OF)           += prom_init.o
+obj-$(CONFIG_MODULES)          += ppc_ksyms.o
+obj-$(CONFIG_BOOTX_TEXT)       += btext.o
+
+ifeq ($(CONFIG_PPC_ISERIES),y)
+$(obj)/head_64.o: $(obj)/lparmap.s
+AFLAGS_head_64.o += -I$(obj)
+endif
+
+else
+# stuff used from here for ARCH=ppc or ARCH=ppc64
+obj-$(CONFIG_PPC64)            += traps.o process.o init_task.o time.o
+
+fpux-$(CONFIG_PPC32)           += fpu.o
+extra-$(CONFIG_PPC_FPU)                += $(fpux-y)
+
+endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
new file mode 100644 (file)
index 0000000..1c83abd
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#ifdef CONFIG_PPC64
+#include <linux/time.h>
+#include <linux/hardirq.h>
+#else
+#include <linux/ptrace.h>
+#include <linux/suspend.h>
+#endif
+
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#include <asm/lppaca.h>
+#include <asm/iSeries/HvLpEvent.h>
+#include <asm/rtas.h>
+#include <asm/cache.h>
+#include <asm/systemcfg.h>
+#include <asm/compat.h>
+#endif
+
+#define DEFINE(sym, val) \
+       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+       DEFINE(THREAD, offsetof(struct task_struct, thread));
+       DEFINE(MM, offsetof(struct task_struct, mm));
+#ifdef CONFIG_PPC64
+       DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
+#else
+       DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
+       DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
+#endif /* CONFIG_PPC64 */
+
+       DEFINE(KSP, offsetof(struct thread_struct, ksp));
+       DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
+       DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
+       DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
+       DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
+#ifdef CONFIG_ALTIVEC
+       DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
+       DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
+       DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
+       DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_PPC64
+       DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
+#else /* CONFIG_PPC64 */
+       DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
+       DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+       DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
+       DEFINE(PT_PTRACED, PT_PTRACED);
+#endif
+#ifdef CONFIG_SPE
+       DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
+       DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
+       DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
+       DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
+#endif /* CONFIG_SPE */
+#endif /* CONFIG_PPC64 */
+
+       DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+       DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+       DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
+#ifdef CONFIG_PPC32
+       DEFINE(TI_TASK, offsetof(struct thread_info, task));
+       DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
+       DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_PPC64
+       DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
+       DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
+       DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
+       DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
+       DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
+       DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
+       DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
+       DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
+
+       /* paca */
+       DEFINE(PACA_SIZE, sizeof(struct paca_struct));
+       DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
+       DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
+       DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
+       DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
+       DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
+       DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
+       DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
+       DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
+       DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
+       DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
+       DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
+       DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
+       DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
+       DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
+#ifdef CONFIG_HUGETLB_PAGE
+       DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
+       DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
+#endif /* CONFIG_HUGETLB_PAGE */
+       DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
+       DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
+       DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
+       DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
+       DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
+       DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
+       DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
+       DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
+
+       DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
+       DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
+       DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
+       DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
+
+       /* RTAS */
+       DEFINE(RTASBASE, offsetof(struct rtas_t, base));
+       DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
+#endif /* CONFIG_PPC64 */
+
+       /* Interrupt register frame */
+       DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
+#ifndef CONFIG_PPC64
+       DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+#else /* CONFIG_PPC64 */
+       DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+       /* 288 = # of volatile regs, int & fp, for leaf routines */
+       /* which do not stack a frame.  See the PPC64 ABI.       */
+       DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
+       /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
+       DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
+       DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
+#endif /* CONFIG_PPC64 */
+       DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
+       DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
+       DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
+       DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
+       DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
+       DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
+       DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
+       DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
+       DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
+       DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
+       DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
+       DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
+       DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
+       DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
+#ifndef CONFIG_PPC64
+       DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
+       DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
+       DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
+       DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
+       DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
+       DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
+       DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
+       DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
+       DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
+       DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
+       DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
+       DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
+       DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
+       DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
+       DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
+       DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
+       DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
+       DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
+#endif /* CONFIG_PPC64 */
+       /*
+        * Note: these symbols include _ because they overlap with special
+        * register names
+        */
+       DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
+       DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
+       DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
+       DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
+       DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
+       DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
+       DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
+       DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
+       DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
+       DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
+#ifndef CONFIG_PPC64
+       DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
+       /*
+        * The PowerPC 400-class & Book-E processors have neither the DAR
+        * nor the DSISR SPRs. Hence, we overload them to hold the similar
+        * DEAR and ESR SPRs for such processors.  For critical interrupts
+        * we use them to hold SRR0 and SRR1.
+        */
+       DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
+       DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
+       DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
+#else /* CONFIG_PPC64 */
+       DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
+       DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
+
+       /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
+       DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
+       DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
+#endif /* CONFIG_PPC64 */
+
+       DEFINE(CLONE_VM, CLONE_VM);
+       DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
+
+#ifndef CONFIG_PPC64
+       DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
+#endif /* ! CONFIG_PPC64 */
+
+       /* About the CPU features table */
+       DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
+       DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
+       DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
+       DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
+       DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
+
+#ifndef CONFIG_PPC64
+       DEFINE(pbe_address, offsetof(struct pbe, address));
+       DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
+       DEFINE(pbe_next, offsetof(struct pbe, next));
+
+       DEFINE(TASK_SIZE, TASK_SIZE);
+       DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
+#else /* CONFIG_PPC64 */
+       /* systemcfg offsets for use by vdso */
+       DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
+       DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
+       DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
+       DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
+       DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
+       DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
+       DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
+       DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
+       DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
+
+       /* timeval/timezone offsets for use by vdso */
+       DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
+       DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
+       DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
+       DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
+       DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
+       DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
+#endif /* CONFIG_PPC64 */
+       return 0;
+}
similarity index 94%
rename from arch/ppc64/kernel/binfmt_elf32.c
rename to arch/powerpc/kernel/binfmt_elf32.c
index fadc699..8ad6b0f 100644 (file)
@@ -70,9 +70,6 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
        value->tv_sec = jiffies / HZ;
 }
 
-extern void start_thread32(struct pt_regs *, unsigned long, unsigned long);
-#undef start_thread
-#define start_thread start_thread32
 #define init_elf_binfmt init_elf32_binfmt
 
 #include "../../../fs/binfmt_elf.c"
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
new file mode 100644 (file)
index 0000000..bdfba92
--- /dev/null
@@ -0,0 +1,853 @@
+/*
+ * Procedures for drawing on the screen early on in the boot process.
+ *
+ * Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/btext.h>
+#include <asm/prom.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/lmb.h>
+#include <asm/processor.h>
+
+#define NO_SCROLL
+
+#ifndef NO_SCROLL
+static void scrollscreen(void);
+#endif
+
+static void draw_byte(unsigned char c, long locX, long locY);
+static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
+static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
+static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
+
+static int g_loc_X;
+static int g_loc_Y;
+static int g_max_loc_X;
+static int g_max_loc_Y;
+
+static int dispDeviceRowBytes;
+static int dispDeviceDepth;
+static int dispDeviceRect[4];
+static unsigned char *dispDeviceBase, *logicalDisplayBase;
+
+unsigned long disp_BAT[2] __initdata = {0, 0};
+
+#define cmapsz (16*256)
+
+static unsigned char vga_font[cmapsz];
+
+int boot_text_mapped;
+int force_printk_to_btext = 0;
+
+#ifdef CONFIG_PPC32
+/* Calc BAT values for mapping the display and store them
+ * in disp_BAT.  Those values are then used from head.S to map
+ * the display during identify_machine() and MMU_Init()
+ *
+ * The display is mapped to virtual address 0xD0000000, rather
+ * than 1:1, because some some CHRP machines put the frame buffer
+ * in the region starting at 0xC0000000 (KERNELBASE).
+ * This mapping is temporary and will disappear as soon as the
+ * setup done by MMU_Init() is applied.
+ *
+ * For now, we align the BAT and then map 8Mb on 601 and 16Mb
+ * on other PPCs. This may cause trouble if the framebuffer
+ * is really badly aligned, but I didn't encounter this case
+ * yet.
+ */
+void __init
+btext_prepare_BAT(void)
+{
+       unsigned long vaddr = KERNELBASE + 0x10000000;
+       unsigned long addr;
+       unsigned long lowbits;
+
+       addr = (unsigned long)dispDeviceBase;
+       if (!addr) {
+               boot_text_mapped = 0;
+               return;
+       }
+       if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
+               /* 603, 604, G3, G4, ... */
+               lowbits = addr & ~0xFF000000UL;
+               addr &= 0xFF000000UL;
+               disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
+               disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW); 
+       } else {
+               /* 601 */
+               lowbits = addr & ~0xFF800000UL;
+               addr &= 0xFF800000UL;
+               disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4;
+               disp_BAT[1] = addr | BL_8M | 0x40;
+       }
+       logicalDisplayBase = (void *) (vaddr + lowbits);
+}
+#endif
+
+/* This function will enable the early boot text when doing OF booting. This
+ * way, xmon output should work too
+ */
+void __init
+btext_setup_display(int width, int height, int depth, int pitch,
+                   unsigned long address)
+{
+       g_loc_X = 0;
+       g_loc_Y = 0;
+       g_max_loc_X = width / 8;
+       g_max_loc_Y = height / 16;
+       logicalDisplayBase = (unsigned char *)address;
+       dispDeviceBase = (unsigned char *)address;
+       dispDeviceRowBytes = pitch;
+       dispDeviceDepth = depth;
+       dispDeviceRect[0] = dispDeviceRect[1] = 0;
+       dispDeviceRect[2] = width;
+       dispDeviceRect[3] = height;
+       boot_text_mapped = 1;
+}
+
+/* Here's a small text engine to use during early boot
+ * or for debugging purposes
+ *
+ * todo:
+ *
+ *  - build some kind of vgacon with it to enable early printk
+ *  - move to a separate file
+ *  - add a few video driver hooks to keep in sync with display
+ *    changes.
+ */
+
+void map_boot_text(void)
+{
+       unsigned long base, offset, size;
+       unsigned char *vbase;
+
+       /* By default, we are no longer mapped */
+       boot_text_mapped = 0;
+       if (dispDeviceBase == 0)
+               return;
+       base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
+       offset = ((unsigned long) dispDeviceBase) - base;
+       size = dispDeviceRowBytes * dispDeviceRect[3] + offset
+               + dispDeviceRect[0];
+       vbase = __ioremap(base, size, _PAGE_NO_CACHE);
+       if (vbase == 0)
+               return;
+       logicalDisplayBase = vbase + offset;
+       boot_text_mapped = 1;
+}
+
+int btext_initialize(struct device_node *np)
+{
+       unsigned int width, height, depth, pitch;
+       unsigned long address = 0;
+       u32 *prop;
+
+       prop = (u32 *)get_property(np, "width", NULL);
+       if (prop == NULL)
+               return -EINVAL;
+       width = *prop;
+       prop = (u32 *)get_property(np, "height", NULL);
+       if (prop == NULL)
+               return -EINVAL;
+       height = *prop;
+       prop = (u32 *)get_property(np, "depth", NULL);
+       if (prop == NULL)
+               return -EINVAL;
+       depth = *prop;
+       pitch = width * ((depth + 7) / 8);
+       prop = (u32 *)get_property(np, "linebytes", NULL);
+       if (prop)
+               pitch = *prop;
+       if (pitch == 1)
+               pitch = 0x1000;
+       prop = (u32 *)get_property(np, "address", NULL);
+       if (prop)
+               address = *prop;
+
+       /* FIXME: Add support for PCI reg properties */
+
+       if (address == 0)
+               return -EINVAL;
+
+       g_loc_X = 0;
+       g_loc_Y = 0;
+       g_max_loc_X = width / 8;
+       g_max_loc_Y = height / 16;
+       logicalDisplayBase = (unsigned char *)address;
+       dispDeviceBase = (unsigned char *)address;
+       dispDeviceRowBytes = pitch;
+       dispDeviceDepth = depth;
+       dispDeviceRect[0] = dispDeviceRect[1] = 0;
+       dispDeviceRect[2] = width;
+       dispDeviceRect[3] = height;
+
+       map_boot_text();
+
+       return 0;
+}
+
+void __init init_boot_display(void)
+{
+       char *name;
+       struct device_node *np = NULL; 
+       int rc = -ENODEV;
+
+       printk("trying to initialize btext ...\n");
+
+       name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
+       if (name != NULL) {
+               np = of_find_node_by_path(name);
+               if (np != NULL) {
+                       if (strcmp(np->type, "display") != 0) {
+                               printk("boot stdout isn't a display !\n");
+                               of_node_put(np);
+                               np = NULL;
+                       }
+               }
+       }
+       if (np)
+               rc = btext_initialize(np);
+       if (rc == 0)
+               return;
+
+       for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
+               if (get_property(np, "linux,opened", NULL)) {
+                       printk("trying %s ...\n", np->full_name);
+                       rc = btext_initialize(np);
+                       printk("result: %d\n", rc);
+               }
+               if (rc == 0)
+                       return;
+       }
+}
+
+/* Calc the base address of a given point (x,y) */
+static unsigned char * calc_base(int x, int y)
+{
+       unsigned char *base;
+
+       base = logicalDisplayBase;
+       if (base == 0)
+               base = dispDeviceBase;
+       base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
+       base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
+       return base;
+}
+
+/* Adjust the display to a new resolution */
+void btext_update_display(unsigned long phys, int width, int height,
+                         int depth, int pitch)
+{
+       if (dispDeviceBase == 0)
+               return;
+
+       /* check it's the same frame buffer (within 256MB) */
+       if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
+               return;
+
+       dispDeviceBase = (__u8 *) phys;
+       dispDeviceRect[0] = 0;
+       dispDeviceRect[1] = 0;
+       dispDeviceRect[2] = width;
+       dispDeviceRect[3] = height;
+       dispDeviceDepth = depth;
+       dispDeviceRowBytes = pitch;
+       if (boot_text_mapped) {
+               iounmap(logicalDisplayBase);
+               boot_text_mapped = 0;
+       }
+       map_boot_text();
+       g_loc_X = 0;
+       g_loc_Y = 0;
+       g_max_loc_X = width / 8;
+       g_max_loc_Y = height / 16;
+}
+EXPORT_SYMBOL(btext_update_display);
+
+void btext_clearscreen(void)
+{
+       unsigned long *base     = (unsigned long *)calc_base(0, 0);
+       unsigned long width     = ((dispDeviceRect[2] - dispDeviceRect[0]) *
+                                       (dispDeviceDepth >> 3)) >> 3;
+       int i,j;
+
+       for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
+       {
+               unsigned long *ptr = base;
+               for(j=width; j; --j)
+                       *(ptr++) = 0;
+               base += (dispDeviceRowBytes >> 3);
+       }
+}
+
+#ifndef NO_SCROLL
+static void scrollscreen(void)
+{
+       unsigned long *src      = (unsigned long *)calc_base(0,16);
+       unsigned long *dst      = (unsigned long *)calc_base(0,0);
+       unsigned long width     = ((dispDeviceRect[2] - dispDeviceRect[0]) *
+                                  (dispDeviceDepth >> 3)) >> 3;
+       int i,j;
+
+       for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
+       {
+               unsigned long *src_ptr = src;
+               unsigned long *dst_ptr = dst;
+               for(j=width; j; --j)
+                       *(dst_ptr++) = *(src_ptr++);
+               src += (dispDeviceRowBytes >> 3);
+               dst += (dispDeviceRowBytes >> 3);
+       }
+       for (i=0; i<16; i++)
+       {
+               unsigned long *dst_ptr = dst;
+               for(j=width; j; --j)
+                       *(dst_ptr++) = 0;
+               dst += (dispDeviceRowBytes >> 3);
+       }
+}
+#endif /* ndef NO_SCROLL */
+
+void btext_drawchar(char c)
+{
+       int cline = 0;
+#ifdef NO_SCROLL
+       int x;
+#endif
+       if (!boot_text_mapped)
+               return;
+
+       switch (c) {
+       case '\b':
+               if (g_loc_X > 0)
+                       --g_loc_X;
+               break;
+       case '\t':
+               g_loc_X = (g_loc_X & -8) + 8;
+               break;
+       case '\r':
+               g_loc_X = 0;
+               break;
+       case '\n':
+               g_loc_X = 0;
+               g_loc_Y++;
+               cline = 1;
+               break;
+       default:
+               draw_byte(c, g_loc_X++, g_loc_Y);
+       }
+       if (g_loc_X >= g_max_loc_X) {
+               g_loc_X = 0;
+               g_loc_Y++;
+               cline = 1;
+       }
+#ifndef NO_SCROLL
+       while (g_loc_Y >= g_max_loc_Y) {
+               scrollscreen();
+               g_loc_Y--;
+       }
+#else
+       /* wrap around from bottom to top of screen so we don't
+          waste time scrolling each line.  -- paulus. */
+       if (g_loc_Y >= g_max_loc_Y)
+               g_loc_Y = 0;
+       if (cline) {
+               for (x = 0; x < g_max_loc_X; ++x)
+                       draw_byte(' ', x, g_loc_Y);
+       }
+#endif
+}
+
+void btext_drawstring(const char *c)
+{
+       if (!boot_text_mapped)
+               return;
+       while (*c)
+               btext_drawchar(*c++);
+}
+
+void btext_drawhex(unsigned long v)
+{
+       char *hex_table = "0123456789abcdef";
+
+       if (!boot_text_mapped)
+               return;
+#ifdef CONFIG_PPC64
+       btext_drawchar(hex_table[(v >> 60) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 56) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 52) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 48) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 44) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 40) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 36) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 32) & 0x0000000FUL]);
+#endif
+       btext_drawchar(hex_table[(v >> 28) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 24) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 20) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 16) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >> 12) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >>  8) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >>  4) & 0x0000000FUL]);
+       btext_drawchar(hex_table[(v >>  0) & 0x0000000FUL]);
+       btext_drawchar(' ');
+}
+
+static void draw_byte(unsigned char c, long locX, long locY)
+{
+       unsigned char *base     = calc_base(locX << 3, locY << 4);
+       unsigned char *font     = &vga_font[((unsigned int)c) * 16];
+       int rb                  = dispDeviceRowBytes;
+
+       switch(dispDeviceDepth) {
+       case 24:
+       case 32:
+               draw_byte_32(font, (unsigned int *)base, rb);
+               break;
+       case 15:
+       case 16:
+               draw_byte_16(font, (unsigned int *)base, rb);
+               break;
+       case 8:
+               draw_byte_8(font, (unsigned int *)base, rb);
+               break;
+       }
+}
+
+static unsigned int expand_bits_8[16] = {
+       0x00000000,
+       0x000000ff,
+       0x0000ff00,
+       0x0000ffff,
+       0x00ff0000,
+       0x00ff00ff,
+       0x00ffff00,
+       0x00ffffff,
+       0xff000000,
+       0xff0000ff,
+       0xff00ff00,
+       0xff00ffff,
+       0xffff0000,
+       0xffff00ff,
+       0xffffff00,
+       0xffffffff
+};
+
+static unsigned int expand_bits_16[4] = {
+       0x00000000,
+       0x0000ffff,
+       0xffff0000,
+       0xffffffff
+};
+
+
+static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
+{
+       int l, bits;
+       int fg = 0xFFFFFFFFUL;
+       int bg = 0x00000000UL;
+
+       for (l = 0; l < 16; ++l)
+       {
+               bits = *font++;
+               base[0] = (-(bits >> 7) & fg) ^ bg;
+               base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
+               base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
+               base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
+               base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
+               base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
+               base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
+               base[7] = (-(bits & 1) & fg) ^ bg;
+               base = (unsigned int *) ((char *)base + rb);
+       }
+}
+
+static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
+{
+       int l, bits;
+       int fg = 0xFFFFFFFFUL;
+       int bg = 0x00000000UL;
+       unsigned int *eb = (int *)expand_bits_16;
+
+       for (l = 0; l < 16; ++l)
+       {
+               bits = *font++;
+               base[0] = (eb[bits >> 6] & fg) ^ bg;
+               base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
+               base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
+               base[3] = (eb[bits & 3] & fg) ^ bg;
+               base = (unsigned int *) ((char *)base + rb);
+       }
+}
+
+static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
+{
+       int l, bits;
+       int fg = 0x0F0F0F0FUL;
+       int bg = 0x00000000UL;
+       unsigned int *eb = (int *)expand_bits_8;
+
+       for (l = 0; l < 16; ++l)
+       {
+               bits = *font++;
+               base[0] = (eb[bits >> 4] & fg) ^ bg;
+               base[1] = (eb[bits & 0xf] & fg) ^ bg;
+               base = (unsigned int *) ((char *)base + rb);
+       }
+}
+
+static unsigned char vga_font[cmapsz] = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
+0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
+0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
+0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
+0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
+0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
+0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
+0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
+0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
+0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
+0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
+0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
+0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
+0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
+0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
+0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
+0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
+0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
+0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
+0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
+0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
+0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
+0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
+0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
+0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
+0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
+0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
+0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
+0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
+0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
+0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
+0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
+0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
+0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
+0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
+0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
+0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
+0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
+0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
+0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
+0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
+0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
+0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
+0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
+0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
+0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
+0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
+0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
+0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
+0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
+0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
+0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
+0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
+0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
+0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
+0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
+0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
+0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
+0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
+0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
+0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
+0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
+0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
+0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
+0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
+0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
+0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
+0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
+0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
+0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
+0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
+0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
+0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
+0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
+0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
+0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
+0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
+0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
+0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
+0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
+0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
+0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
+0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
+0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
+0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
+0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
+0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
+0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
+0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
+0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
+0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
+0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
+0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
+0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
+0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
+0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
+0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
+0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
+0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
+0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
+0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
+0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
+0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
+0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
+0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
+0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
+0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
+0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
+0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
+0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
+0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
+0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
+0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
+0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
+0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00,
+};
similarity index 51%
rename from arch/ppc/kernel/cputable.c
rename to arch/powerpc/kernel/cputable.c
index 6b76cf5..1fb80ba 100644 (file)
@@ -1,8 +1,9 @@
 /*
- *  arch/ppc/kernel/cputable.c
- *
  *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
  *
+ *  Modifications for ppc64:
+ *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
+ *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
  *  as published by the Free Software Foundation; either version
 #include <linux/sched.h>
 #include <linux/threads.h>
 #include <linux/init.h>
-#include <asm/cputable.h>
+#include <linux/module.h>
 
-struct cpu_spec* cur_cpu_spec[NR_CPUS];
+#include <asm/oprofile_impl.h>
+#include <asm/cputable.h>
 
-extern void __setup_cpu_601(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_603(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_604(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_750(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_750cx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_750fx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_7400(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_7410(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_745x(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_power3(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_power4(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_ppc970(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_generic(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
+struct cpu_spec* cur_cpu_spec = NULL;
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(cur_cpu_spec);
+#endif
 
-#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
-                    !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
-                    !defined(CONFIG_BOOKE))
+/* NOTE:
+ * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
+ * the responsibility of the appropriate CPU save/restore functions to
+ * eventually copy these settings over. Those save/restore aren't yet
+ * part of the cputable though. That has to be fixed for both ppc32
+ * and ppc64
+ */
+#ifdef CONFIG_PPC64
+extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
+#else
+extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
+#endif /* CONFIG_PPC32 */
+extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
 
 /* This table only contains "desktop" CPUs, it need to be filled with embedded
  * ones as well...
  */
-#define COMMON_PPC     (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
-                        PPC_FEATURE_HAS_MMU)
+#define COMMON_USER            (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
+                                PPC_FEATURE_HAS_MMU)
+#define COMMON_USER_PPC64      (COMMON_USER | PPC_FEATURE_64)
 
-/* We only set the altivec features if the kernel was compiled with altivec
- * support
- */
-#ifdef CONFIG_ALTIVEC
-#define CPU_FTR_ALTIVEC_COMP           CPU_FTR_ALTIVEC
-#define PPC_FEATURE_ALTIVEC_COMP       PPC_FEATURE_HAS_ALTIVEC
-#else
-#define CPU_FTR_ALTIVEC_COMP           0
-#define PPC_FEATURE_ALTIVEC_COMP               0
-#endif
 
 /* We only set the spe features if the kernel was compiled with
  * spe support
  */
 #ifdef CONFIG_SPE
-#define PPC_FEATURE_SPE_COMP           PPC_FEATURE_HAS_SPE
+#define PPC_FEATURE_SPE_COMP   PPC_FEATURE_HAS_SPE
 #else
-#define PPC_FEATURE_SPE_COMP           0
+#define PPC_FEATURE_SPE_COMP   0
 #endif
 
-/* We need to mark all pages as being coherent if we're SMP or we
- * have a 74[45]x and an MPC107 host bridge.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
-#define CPU_FTR_COMMON                  CPU_FTR_NEED_COHERENT
-#else
-#define CPU_FTR_COMMON                  0
+struct cpu_spec        cpu_specs[] = {
+#ifdef CONFIG_PPC64
+       {       /* Power3 */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00400000,
+               .cpu_name               = "POWER3 (630)",
+               .cpu_features           = CPU_FTRS_POWER3,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
+               .cpu_setup              = __setup_cpu_power3,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/power3",
+               .oprofile_model         = &op_model_rs64,
 #endif
-
-/* The powersave features NAP & DOZE seems to confuse BDI when
-   debugging. So if a BDI is used, disable theses
- */
-#ifndef CONFIG_BDI_SWITCH
-#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE
-#define CPU_FTR_MAYBE_CAN_NAP  CPU_FTR_CAN_NAP
+       },
+       {       /* Power3+ */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00410000,
+               .cpu_name               = "POWER3 (630+)",
+               .cpu_features           = CPU_FTRS_POWER3,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
+               .cpu_setup              = __setup_cpu_power3,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/power3",
+               .oprofile_model         = &op_model_rs64,
+#endif
+       },
+       {       /* Northstar */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00330000,
+               .cpu_name               = "RS64-II (northstar)",
+               .cpu_features           = CPU_FTRS_RS64,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
+               .cpu_setup              = __setup_cpu_power3,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/rs64",
+               .oprofile_model         = &op_model_rs64,
+#endif
+       },
+       {       /* Pulsar */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00340000,
+               .cpu_name               = "RS64-III (pulsar)",
+               .cpu_features           = CPU_FTRS_RS64,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
+               .cpu_setup              = __setup_cpu_power3,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/rs64",
+               .oprofile_model         = &op_model_rs64,
+#endif
+       },
+       {       /* I-star */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00360000,
+               .cpu_name               = "RS64-III (icestar)",
+               .cpu_features           = CPU_FTRS_RS64,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
+               .cpu_setup              = __setup_cpu_power3,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/rs64",
+               .oprofile_model         = &op_model_rs64,
+#endif
+       },
+       {       /* S-star */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00370000,
+               .cpu_name               = "RS64-IV (sstar)",
+               .cpu_features           = CPU_FTRS_RS64,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
+               .cpu_setup              = __setup_cpu_power3,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/rs64",
+               .oprofile_model         = &op_model_rs64,
+#endif
+       },
+       {       /* Power4 */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00350000,
+               .cpu_name               = "POWER4 (gp)",
+               .cpu_features           = CPU_FTRS_POWER4,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
+               .cpu_setup              = __setup_cpu_power4,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/power4",
+               .oprofile_model         = &op_model_rs64,
+#endif
+       },
+       {       /* Power4+ */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00380000,
+               .cpu_name               = "POWER4+ (gq)",
+               .cpu_features           = CPU_FTRS_POWER4,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
+               .cpu_setup              = __setup_cpu_power4,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/power4",
+               .oprofile_model         = &op_model_power4,
+#endif
+       },
+       {       /* PPC970 */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00390000,
+               .cpu_name               = "PPC970",
+               .cpu_features           = CPU_FTRS_PPC970,
+               .cpu_user_features      = COMMON_USER_PPC64 |
+                       PPC_FEATURE_HAS_ALTIVEC_COMP,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
+               .cpu_setup              = __setup_cpu_ppc970,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/970",
+               .oprofile_model         = &op_model_power4,
+#endif
+       },
+#endif /* CONFIG_PPC64 */
+#if defined(CONFIG_PPC64) || defined(CONFIG_POWER4)
+       {       /* PPC970FX */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x003c0000,
+               .cpu_name               = "PPC970FX",
+#ifdef CONFIG_PPC32
+               .cpu_features           = CPU_FTRS_970_32,
 #else
-#define CPU_FTR_MAYBE_CAN_DOZE 0
-#define CPU_FTR_MAYBE_CAN_NAP  0
+               .cpu_features           = CPU_FTRS_PPC970,
 #endif
-
-struct cpu_spec        cpu_specs[] = {
+               .cpu_user_features      = COMMON_USER_PPC64 |
+                       PPC_FEATURE_HAS_ALTIVEC_COMP,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 8,
+               .cpu_setup              = __setup_cpu_ppc970,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/970",
+               .oprofile_model         = &op_model_power4,
+#endif
+       },
+#endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */
+#ifdef CONFIG_PPC64
+       {       /* PPC970MP */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00440000,
+               .cpu_name               = "PPC970MP",
+               .cpu_features           = CPU_FTRS_PPC970,
+               .cpu_user_features      = COMMON_USER_PPC64 |
+                       PPC_FEATURE_HAS_ALTIVEC_COMP,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .cpu_setup              = __setup_cpu_ppc970,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/970",
+               .oprofile_model         = &op_model_power4,
+#endif
+       },
+       {       /* Power5 */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x003a0000,
+               .cpu_name               = "POWER5 (gr)",
+               .cpu_features           = CPU_FTRS_POWER5,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .cpu_setup              = __setup_cpu_power4,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/power5",
+               .oprofile_model         = &op_model_power4,
+#endif
+       },
+       {       /* Power5 */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x003b0000,
+               .cpu_name               = "POWER5 (gs)",
+               .cpu_features           = CPU_FTRS_POWER5,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .cpu_setup              = __setup_cpu_power4,
+#ifdef CONFIG_OPROFILE
+               .oprofile_cpu_type      = "ppc64/power5",
+               .oprofile_model         = &op_model_power4,
+#endif
+       },
+       {       /* BE DD1.x */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00700000,
+               .cpu_name               = "Cell Broadband Engine",
+               .cpu_features           = CPU_FTRS_CELL,
+               .cpu_user_features      = COMMON_USER_PPC64 |
+                       PPC_FEATURE_HAS_ALTIVEC_COMP,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .cpu_setup              = __setup_cpu_be,
+       },
+       {       /* default match */
+               .pvr_mask               = 0x00000000,
+               .pvr_value              = 0x00000000,
+               .cpu_name               = "POWER4 (compatible)",
+               .cpu_features           = CPU_FTRS_COMPATIBLE,
+               .cpu_user_features      = COMMON_USER_PPC64,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .cpu_setup              = __setup_cpu_power4,
+       }
+#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC32
 #if CLASSIC_PPC
-       {       /* 601 */
+       {       /* 601 */
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x00010000,
                .cpu_name               = "601",
-               .cpu_features           = CPU_FTR_COMMON | CPU_FTR_601 |
-                       CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_601_INSTR |
+               .cpu_features           = CPU_FTRS_PPC601,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_601_INSTR |
                        PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
-               .cpu_setup              = __setup_cpu_601
        },
        {       /* 603 */
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x00030000,
                .cpu_name               = "603",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_603,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .cpu_setup              = __setup_cpu_603
@@ -112,10 +321,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x00060000,
                .cpu_name               = "603e",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_603,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .cpu_setup              = __setup_cpu_603
@@ -124,10 +331,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x00070000,
                .cpu_name               = "603ev",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_603,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .cpu_setup              = __setup_cpu_603
@@ -136,10 +341,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x00040000,
                .cpu_name               = "604",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_604,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 2,
@@ -149,10 +352,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xfffff000,
                .pvr_value              = 0x00090000,
                .cpu_name               = "604e",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_604,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -162,10 +363,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x00090000,
                .cpu_name               = "604r",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_604,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -175,10 +374,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x000a0000,
                .cpu_name               = "604ev",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_604,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -188,11 +385,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffffff,
                .pvr_value              = 0x00084202,
                .cpu_name               = "740/750",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_740_NOTAU,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -202,11 +396,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xfffffff0,
                .pvr_value              = 0x00080100,
                .cpu_name               = "750CX",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_750,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -216,11 +407,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xfffffff0,
                .pvr_value              = 0x00082200,
                .cpu_name               = "750CX",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_750,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -230,11 +418,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xfffffff0,
                .pvr_value              = 0x00082210,
                .cpu_name               = "750CXe",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_750,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -244,11 +429,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffffff,
                .pvr_value              = 0x00083214,
                .cpu_name               = "750CXe",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_750,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -258,11 +440,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xfffff000,
                .pvr_value              = 0x00083000,
                .cpu_name               = "745/755",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_750,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -272,12 +451,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffff00,
                .pvr_value              = 0x70000100,
                .cpu_name               = "750FX",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
-                       CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_750FX1,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -287,12 +462,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffffff,
                .pvr_value              = 0x70000200,
                .cpu_name               = "750FX",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
-                       CPU_FTR_NO_DPM,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_750FX2,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -302,12 +473,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x70000000,
                .cpu_name               = "750FX",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
-                       CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_750FX,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -317,12 +484,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x70020000,
                .cpu_name               = "750GX",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
-                       CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_DUAL_PLL_750FX |
-                       CPU_FTR_HAS_HIGH_BATS,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_750GX,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -332,11 +495,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x00080000,
                .cpu_name               = "740/750",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_740,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -346,11 +506,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffffff,
                .pvr_value              = 0x000c1101,
                .cpu_name               = "7400 (1.1)",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7400_NOTAU,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -360,12 +517,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x000c0000,
                .cpu_name               = "7400",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7400,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -375,12 +528,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x800c0000,
                .cpu_name               = "7410",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7400,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
@@ -390,12 +539,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffffff,
                .pvr_value              = 0x80000200,
                .cpu_name               = "7450",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-                       CPU_FTR_NEED_COHERENT,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7450_20,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -405,14 +550,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffffff,
                .pvr_value              = 0x80000201,
                .cpu_name               = "7450",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-                       CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
-                       CPU_FTR_NEED_COHERENT,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7450_21,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -422,13 +561,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x80000000,
                .cpu_name               = "7450",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-                       CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7450_23,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -438,12 +572,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffff00,
                .pvr_value              = 0x80010100,
                .cpu_name               = "7455",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-                       CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7455_1,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -453,14 +583,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffffff,
                .pvr_value              = 0x80010200,
                .cpu_name               = "7455",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-                       CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
-                       CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7455_20,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -470,14 +594,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x80010000,
                .cpu_name               = "7455",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-                       CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
-                       CPU_FTR_NEED_COHERENT,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7455,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -487,14 +605,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffffff,
                .pvr_value              = 0x80020100,
                .cpu_name               = "7447/7457",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-                       CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
-                       CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7447_10,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -504,14 +616,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffffffff,
                .pvr_value              = 0x80020101,
                .cpu_name               = "7447/7457",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-                       CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
-                       CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7447_10,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -521,14 +627,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x80020000,
                .cpu_name               = "7447/7457",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-                       CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-                       CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
-                       CPU_FTR_NEED_COHERENT,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7447,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -538,13 +638,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x80030000,
                .cpu_name               = "7447A",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
-                       CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7447A,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -554,13 +649,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x80040000,
                .cpu_name               = "7448",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
-                       CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+               .cpu_features           = CPU_FTRS_7447A,
+               .cpu_user_features      = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 6,
@@ -570,10 +660,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0x7fff0000,
                .pvr_value              = 0x00810000,
                .cpu_name               = "82xx",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-                       CPU_FTR_USE_TB,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_82XX,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .cpu_setup              = __setup_cpu_603
@@ -582,10 +670,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0x7fff0000,
                .pvr_value              = 0x00820000,
                .cpu_name               = "G2_LE",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_G2_LE,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .cpu_setup              = __setup_cpu_603
@@ -594,10 +680,8 @@ struct cpu_spec    cpu_specs[] = {
                .pvr_mask               = 0x7fff0000,
                .pvr_value              = 0x00830000,
                .cpu_name               = "e300",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
-                       CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_E300,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .cpu_setup              = __setup_cpu_603
@@ -606,114 +690,12 @@ struct cpu_spec  cpu_specs[] = {
                .pvr_mask               = 0x00000000,
                .pvr_value              = 0x00000000,
                .cpu_name               = "(generic PPC)",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC,
+               .cpu_features           = CPU_FTRS_CLASSIC32,
+               .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
-               .cpu_setup              = __setup_cpu_generic
        },
 #endif /* CLASSIC_PPC */
-#ifdef CONFIG_PPC64BRIDGE
-       {       /* Power3 */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00400000,
-               .cpu_name               = "Power3 (630)",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power3
-       },
-       {       /* Power3+ */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00410000,
-               .cpu_name               = "Power3 (630+)",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power3
-       },
-       {       /* I-star */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00360000,
-               .cpu_name               = "I-star",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power3
-       },
-       {       /* S-star */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00370000,
-               .cpu_name               = "S-star",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power3
-       },
-#endif /* CONFIG_PPC64BRIDGE */
-#ifdef CONFIG_POWER4
-       {       /* Power4 */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00350000,
-               .cpu_name               = "Power4",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_HPTE_TABLE,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power4
-       },
-       {       /* PPC970 */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00390000,
-               .cpu_name               = "PPC970",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_64 |
-                       PPC_FEATURE_ALTIVEC_COMP,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_ppc970
-       },
-       {       /* PPC970FX */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x003c0000,
-               .cpu_name               = "PPC970FX",
-               .cpu_features           = CPU_FTR_COMMON |
-                       CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-                       CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
-               .cpu_user_features      = COMMON_PPC | PPC_FEATURE_64 |
-                       PPC_FEATURE_ALTIVEC_COMP,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_ppc970
-       },
-#endif /* CONFIG_POWER4 */
 #ifdef CONFIG_8xx
        {       /* 8xx */
                .pvr_mask               = 0xffff0000,
@@ -721,8 +703,7 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_name               = "8xx",
                /* CPU_FTR_MAYBE_CAN_DOZE is possible,
                 * if the 8xx code is there.... */
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_8XX,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 16,
                .dcache_bsize           = 16,
@@ -733,8 +714,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffffff00,
                .pvr_value              = 0x00200200,
                .cpu_name               = "403GC",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 16,
                .dcache_bsize           = 16,
@@ -743,8 +723,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffffff00,
                .pvr_value              = 0x00201400,
                .cpu_name               = "403GCX",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
                .icache_bsize           = 16,
@@ -754,8 +733,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x00200000,
                .cpu_name               = "403G ??",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 16,
                .dcache_bsize           = 16,
@@ -764,8 +742,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x40110000,
                .cpu_name               = "405GP",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
@@ -775,8 +752,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x40130000,
                .cpu_name               = "STB03xxx",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
@@ -786,8 +762,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x41810000,
                .cpu_name               = "STB04xxx",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
@@ -797,8 +772,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x41610000,
                .cpu_name               = "NP405L",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
@@ -808,8 +782,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x40B10000,
                .cpu_name               = "NP4GS3",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
@@ -819,8 +792,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x41410000,
                .cpu_name               = "NP405H",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
@@ -830,8 +802,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x50910000,
                .cpu_name               = "405GPr",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
@@ -841,8 +812,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x51510000,
                .cpu_name               = "STBx25xx",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
@@ -852,8 +822,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x41F10000,
                .cpu_name               = "405LP",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
@@ -862,8 +831,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x20010000,
                .cpu_name               = "Virtex-II Pro",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
@@ -873,8 +841,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x51210000,
                .cpu_name               = "405EP",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_40X,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
@@ -887,9 +854,8 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x40000850,
                .cpu_name               = "440EP Rev. A",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
-               .cpu_user_features      = COMMON_PPC, /* 440EP has an FPU */
+               .cpu_features           = CPU_FTRS_44X,
+               .cpu_user_features      = COMMON_USER, /* 440EP has an FPU */
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
        },
@@ -897,28 +863,25 @@ struct cpu_spec   cpu_specs[] = {
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x400008d3,
                .cpu_name               = "440EP Rev. B",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
-               .cpu_user_features      = COMMON_PPC, /* 440EP has an FPU */
+               .cpu_features           = CPU_FTRS_44X,
+               .cpu_user_features      = COMMON_USER, /* 440EP has an FPU */
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
        },
-       {       /* 440GP Rev. B */
+       {       /* 440GP Rev. B */
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x40000440,
                .cpu_name               = "440GP Rev. B",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_44X,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
        },
-       {       /* 440GP Rev. C */
+       {       /* 440GP Rev. C */
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x40000481,
                .cpu_name               = "440GP Rev. C",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_44X,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
@@ -927,8 +890,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x50000850,
                .cpu_name               = "440GX Rev. A",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_44X,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
@@ -937,8 +899,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x50000851,
                .cpu_name               = "440GX Rev. B",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_44X,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
@@ -947,8 +908,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x50000892,
                .cpu_name               = "440GX Rev. C",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_44X,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
@@ -957,8 +917,7 @@ struct cpu_spec     cpu_specs[] = {
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x50000894,
                .cpu_name               = "440GX Rev. F",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_44X,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
@@ -967,44 +926,42 @@ struct cpu_spec   cpu_specs[] = {
                .pvr_mask               = 0xff000fff,
                .pvr_value              = 0x53000891,
                .cpu_name               = "440SP Rev. A",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_44X,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
        },
 #endif /* CONFIG_44x */
 #ifdef CONFIG_FSL_BOOKE
-       {       /* e200z5 */
+       {       /* e200z5 */
                .pvr_mask               = 0xfff00000,
                .pvr_value              = 0x81000000,
                .cpu_name               = "e200z5",
                /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
-               .cpu_features           = CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_E200,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE |
                        PPC_FEATURE_UNIFIED_CACHE,
                .dcache_bsize           = 32,
        },
-       {       /* e200z6 */
+       {       /* e200z6 */
                .pvr_mask               = 0xfff00000,
                .pvr_value              = 0x81100000,
                .cpu_name               = "e200z6",
                /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
-               .cpu_features           = CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_E200,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
                        PPC_FEATURE_HAS_EFP_SINGLE |
                        PPC_FEATURE_UNIFIED_CACHE,
                .dcache_bsize           = 32,
        },
-       {       /* e500 */
+       {       /* e500 */
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x80200000,
                .cpu_name               = "e500",
                /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_E500,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
                        PPC_FEATURE_HAS_EFP_SINGLE,
@@ -1012,13 +969,12 @@ struct cpu_spec  cpu_specs[] = {
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
        },
-       {       /* e500v2 */
+       {       /* e500v2 */
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x80210000,
                .cpu_name               = "e500v2",
                /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_BIG_PHYS,
+               .cpu_features           = CPU_FTRS_E500_2,
                .cpu_user_features      = PPC_FEATURE_32 |
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
                        PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE,
@@ -1032,10 +988,11 @@ struct cpu_spec  cpu_specs[] = {
                .pvr_mask               = 0x00000000,
                .pvr_value              = 0x00000000,
                .cpu_name               = "(generic PPC)",
-               .cpu_features           = CPU_FTR_COMMON,
+               .cpu_features           = CPU_FTRS_GENERIC_32,
                .cpu_user_features      = PPC_FEATURE_32,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
        }
 #endif /* !CLASSIC_PPC */
+#endif /* CONFIG_PPC32 */
 };
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
new file mode 100644 (file)
index 0000000..37b4396
--- /dev/null
@@ -0,0 +1,1002 @@
+/*
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
+ *  Adapted for Power Macintosh by Paul Mackerras.
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
+ *
+ *  This file contains the system call entry code, context switch
+ *  code, and exception/interrupt return code for PowerPC.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sys.h>
+#include <linux/threads.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+#undef SHOW_SYSCALLS
+#undef SHOW_SYSCALLS_TASK
+
+/*
+ * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
+ */
+#if MSR_KERNEL >= 0x10000
+#define LOAD_MSR_KERNEL(r, x)  lis r,(x)@h; ori r,r,(x)@l
+#else
+#define LOAD_MSR_KERNEL(r, x)  li r,(x)
+#endif
+
+#ifdef CONFIG_BOOKE
+#include "head_booke.h"
+#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level)       \
+       mtspr   exc_level##_SPRG,r8;                    \
+       BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);          \
+       lwz     r0,GPR10-INT_FRAME_SIZE(r8);            \
+       stw     r0,GPR10(r11);                          \
+       lwz     r0,GPR11-INT_FRAME_SIZE(r8);            \
+       stw     r0,GPR11(r11);                          \
+       mfspr   r8,exc_level##_SPRG
+
+       .globl  mcheck_transfer_to_handler
+mcheck_transfer_to_handler:
+       TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
+       b       transfer_to_handler_full
+
+       .globl  debug_transfer_to_handler
+debug_transfer_to_handler:
+       TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
+       b       transfer_to_handler_full
+
+       .globl  crit_transfer_to_handler
+crit_transfer_to_handler:
+       TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
+       /* fall through */
+#endif
+
+#ifdef CONFIG_40x
+       .globl  crit_transfer_to_handler
+crit_transfer_to_handler:
+       lwz     r0,crit_r10@l(0)
+       stw     r0,GPR10(r11)
+       lwz     r0,crit_r11@l(0)
+       stw     r0,GPR11(r11)
+       /* fall through */
+#endif
+
+/*
+ * This code finishes saving the registers to the exception frame
+ * and jumps to the appropriate handler for the exception, turning
+ * on address translation.
+ * Note that we rely on the caller having set cr0.eq iff the exception
+ * occurred in kernel mode (i.e. MSR:PR = 0).
+ */
+       .globl  transfer_to_handler_full
+transfer_to_handler_full:
+       SAVE_NVGPRS(r11)
+       /* fall through */
+
+       .globl  transfer_to_handler
+transfer_to_handler:
+       stw     r2,GPR2(r11)
+       stw     r12,_NIP(r11)
+       stw     r9,_MSR(r11)
+       andi.   r2,r9,MSR_PR
+       mfctr   r12
+       mfspr   r2,SPRN_XER
+       stw     r12,_CTR(r11)
+       stw     r2,_XER(r11)
+       mfspr   r12,SPRN_SPRG3
+       addi    r2,r12,-THREAD
+       tovirt(r2,r2)                   /* set r2 to current */
+       beq     2f                      /* if from user, fix up THREAD.regs */
+       addi    r11,r1,STACK_FRAME_OVERHEAD
+       stw     r11,PT_REGS(r12)
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+       /* Check to see if the dbcr0 register is set up to debug.  Use the
+          single-step bit to do this. */
+       lwz     r12,THREAD_DBCR0(r12)
+       andis.  r12,r12,DBCR0_IC@h
+       beq+    3f
+       /* From user and task is ptraced - load up global dbcr0 */
+       li      r12,-1                  /* clear all pending debug events */
+       mtspr   SPRN_DBSR,r12
+       lis     r11,global_dbcr0@ha
+       tophys(r11,r11)
+       addi    r11,r11,global_dbcr0@l
+       lwz     r12,0(r11)
+       mtspr   SPRN_DBCR0,r12
+       lwz     r12,4(r11)
+       addi    r12,r12,-1
+       stw     r12,4(r11)
+#endif
+       b       3f
+2:     /* if from kernel, check interrupted DOZE/NAP mode and
+         * check for stack overflow
+         */
+#ifdef CONFIG_6xx
+       mfspr   r11,SPRN_HID0
+       mtcr    r11
+BEGIN_FTR_SECTION
+       bt-     8,power_save_6xx_restore        /* Check DOZE */
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
+BEGIN_FTR_SECTION
+       bt-     9,power_save_6xx_restore        /* Check NAP */
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+#endif /* CONFIG_6xx */
+       .globl transfer_to_handler_cont
+transfer_to_handler_cont:
+       lwz     r11,THREAD_INFO-THREAD(r12)
+       cmplw   r1,r11                  /* if r1 <= current->thread_info */
+       ble-    stack_ovf               /* then the kernel stack overflowed */
+3:
+       mflr    r9
+       lwz     r11,0(r9)               /* virtual address of handler */
+       lwz     r9,4(r9)                /* where to go when done */
+       FIX_SRR1(r10,r12)
+       mtspr   SPRN_SRR0,r11
+       mtspr   SPRN_SRR1,r10
+       mtlr    r9
+       SYNC
+       RFI                             /* jump to handler, enable MMU */
+
+/*
+ * On kernel stack overflow, load up an initial stack pointer
+ * and call StackOverflow(regs), which should not return.
+ */
+stack_ovf:
+       /* sometimes we use a statically-allocated stack, which is OK. */
+       lis     r11,_end@h
+       ori     r11,r11,_end@l
+       cmplw   r1,r11
+       ble     3b                      /* r1 <= &_end is OK */
+       SAVE_NVGPRS(r11)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       lis     r1,init_thread_union@ha
+       addi    r1,r1,init_thread_union@l
+       addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+       lis     r9,StackOverflow@ha
+       addi    r9,r9,StackOverflow@l
+       LOAD_MSR_KERNEL(r10,MSR_KERNEL)
+       FIX_SRR1(r10,r12)
+       mtspr   SPRN_SRR0,r9
+       mtspr   SPRN_SRR1,r10
+       SYNC
+       RFI
+
+/*
+ * Handle a system call.
+ */
+       .stabs  "arch/powerpc/kernel/",N_SO,0,0,0f
+       .stabs  "entry_32.S",N_SO,0,0,0f
+0:
+
+_GLOBAL(DoSyscall)
+       stw     r0,THREAD+LAST_SYSCALL(r2)
+       stw     r3,ORIG_GPR3(r1)
+       li      r12,0
+       stw     r12,RESULT(r1)
+       lwz     r11,_CCR(r1)    /* Clear SO bit in CR */
+       rlwinm  r11,r11,0,4,2
+       stw     r11,_CCR(r1)
+#ifdef SHOW_SYSCALLS
+       bl      do_show_syscall
+#endif /* SHOW_SYSCALLS */
+       rlwinm  r10,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
+       li      r11,0
+       stb     r11,TI_SC_NOERR(r10)
+       lwz     r11,TI_FLAGS(r10)
+       andi.   r11,r11,_TIF_SYSCALL_T_OR_A
+       bne-    syscall_dotrace
+syscall_dotrace_cont:
+       cmplwi  0,r0,NR_syscalls
+       lis     r10,sys_call_table@h
+       ori     r10,r10,sys_call_table@l
+       slwi    r0,r0,2
+       bge-    66f
+       lwzx    r10,r10,r0      /* Fetch system call handler [ptr] */
+       mtlr    r10
+       addi    r9,r1,STACK_FRAME_OVERHEAD
+       PPC440EP_ERR42
+       blrl                    /* Call handler */
+       .globl  ret_from_syscall
+ret_from_syscall:
+#ifdef SHOW_SYSCALLS
+       bl      do_show_syscall_exit
+#endif
+       mr      r6,r3
+       li      r11,-_LAST_ERRNO
+       cmplw   0,r3,r11
+       rlwinm  r12,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
+       blt+    30f
+       lbz     r11,TI_SC_NOERR(r12)
+       cmpwi   r11,0
+       bne     30f
+       neg     r3,r3
+       lwz     r10,_CCR(r1)    /* Set SO bit in CR */
+       oris    r10,r10,0x1000
+       stw     r10,_CCR(r1)
+
+       /* disable interrupts so current_thread_info()->flags can't change */
+30:    LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+       SYNC
+       MTMSRD(r10)
+       lwz     r9,TI_FLAGS(r12)
+       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+       bne-    syscall_exit_work
+syscall_exit_cont:
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+       /* If the process has its own DBCR0 value, load it up.  The single
+          step bit tells us that dbcr0 should be loaded. */
+       lwz     r0,THREAD+THREAD_DBCR0(r2)
+       andis.  r10,r0,DBCR0_IC@h
+       bnel-   load_dbcr0
+#endif
+       stwcx.  r0,0,r1                 /* to clear the reservation */
+       lwz     r4,_LINK(r1)
+       lwz     r5,_CCR(r1)
+       mtlr    r4
+       mtcr    r5
+       lwz     r7,_NIP(r1)
+       lwz     r8,_MSR(r1)
+       FIX_SRR1(r8, r0)
+       lwz     r2,GPR2(r1)
+       lwz     r1,GPR1(r1)
+       mtspr   SPRN_SRR0,r7
+       mtspr   SPRN_SRR1,r8
+       SYNC
+       RFI
+
+66:    li      r3,-ENOSYS
+       b       ret_from_syscall
+
+       .globl  ret_from_fork
+ret_from_fork:
+       REST_NVGPRS(r1)
+       bl      schedule_tail
+       li      r3,0
+       b       ret_from_syscall
+
+/* Traced system call support */
+syscall_dotrace:
+       SAVE_NVGPRS(r1)
+       li      r0,0xc00
+       stw     r0,TRAP(r1)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      do_syscall_trace_enter
+       lwz     r0,GPR0(r1)     /* Restore original registers */
+       lwz     r3,GPR3(r1)
+       lwz     r4,GPR4(r1)
+       lwz     r5,GPR5(r1)
+       lwz     r6,GPR6(r1)
+       lwz     r7,GPR7(r1)
+       lwz     r8,GPR8(r1)
+       REST_NVGPRS(r1)
+       b       syscall_dotrace_cont
+
+syscall_exit_work:
+       stw     r6,RESULT(r1)   /* Save result */
+       stw     r3,GPR3(r1)     /* Update return value */
+       andi.   r0,r9,_TIF_SYSCALL_T_OR_A
+       beq     5f
+       ori     r10,r10,MSR_EE
+       SYNC
+       MTMSRD(r10)             /* re-enable interrupts */
+       lwz     r4,TRAP(r1)
+       andi.   r4,r4,1
+       beq     4f
+       SAVE_NVGPRS(r1)
+       li      r4,0xc00
+       stw     r4,TRAP(r1)
+4:
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      do_syscall_trace_leave
+       REST_NVGPRS(r1)
+2:
+       lwz     r3,GPR3(r1)
+       LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+       SYNC
+       MTMSRD(r10)             /* disable interrupts again */
+       rlwinm  r12,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
+       lwz     r9,TI_FLAGS(r12)
+5:
+       andi.   r0,r9,_TIF_NEED_RESCHED
+       bne     1f
+       lwz     r5,_MSR(r1)
+       andi.   r5,r5,MSR_PR
+       beq     syscall_exit_cont
+       andi.   r0,r9,_TIF_SIGPENDING
+       beq     syscall_exit_cont
+       b       do_user_signal
+1:
+       ori     r10,r10,MSR_EE
+       SYNC
+       MTMSRD(r10)             /* re-enable interrupts */
+       bl      schedule
+       b       2b
+
+#ifdef SHOW_SYSCALLS
+do_show_syscall:
+#ifdef SHOW_SYSCALLS_TASK
+       lis     r11,show_syscalls_task@ha
+       lwz     r11,show_syscalls_task@l(r11)
+       cmp     0,r2,r11
+       bnelr
+#endif
+       stw     r31,GPR31(r1)
+       mflr    r31
+       lis     r3,7f@ha
+       addi    r3,r3,7f@l
+       lwz     r4,GPR0(r1)
+       lwz     r5,GPR3(r1)
+       lwz     r6,GPR4(r1)
+       lwz     r7,GPR5(r1)
+       lwz     r8,GPR6(r1)
+       lwz     r9,GPR7(r1)
+       bl      printk
+       lis     r3,77f@ha
+       addi    r3,r3,77f@l
+       lwz     r4,GPR8(r1)
+       mr      r5,r2
+       bl      printk
+       lwz     r0,GPR0(r1)
+       lwz     r3,GPR3(r1)
+       lwz     r4,GPR4(r1)
+       lwz     r5,GPR5(r1)
+       lwz     r6,GPR6(r1)
+       lwz     r7,GPR7(r1)
+       lwz     r8,GPR8(r1)
+       mtlr    r31
+       lwz     r31,GPR31(r1)
+       blr
+
+do_show_syscall_exit:
+#ifdef SHOW_SYSCALLS_TASK
+       lis     r11,show_syscalls_task@ha
+       lwz     r11,show_syscalls_task@l(r11)
+       cmp     0,r2,r11
+       bnelr
+#endif
+       stw     r31,GPR31(r1)
+       mflr    r31
+       stw     r3,RESULT(r1)   /* Save result */
+       mr      r4,r3
+       lis     r3,79f@ha
+       addi    r3,r3,79f@l
+       bl      printk
+       lwz     r3,RESULT(r1)
+       mtlr    r31
+       lwz     r31,GPR31(r1)
+       blr
+
+7:     .string "syscall %d(%x, %x, %x, %x, %x, "
+77:    .string "%x), current=%p\n"
+79:    .string " -> %x\n"
+       .align  2,0
+
+#ifdef SHOW_SYSCALLS_TASK
+       .data
+       .globl  show_syscalls_task
+show_syscalls_task:
+       .long   -1
+       .text
+#endif
+#endif /* SHOW_SYSCALLS */
+
+/*
+ * The sigsuspend and rt_sigsuspend system calls can call do_signal
+ * and thus put the process into the stopped state where we might
+ * want to examine its user state with ptrace.  Therefore we need
+ * to save all the nonvolatile registers (r13 - r31) before calling
+ * the C code.
+ */
+       .globl  ppc_sigsuspend
+ppc_sigsuspend:
+       SAVE_NVGPRS(r1)
+       lwz     r0,TRAP(r1)
+       rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
+       stw     r0,TRAP(r1)             /* register set saved */
+       b       sys_sigsuspend
+
+       .globl  ppc_rt_sigsuspend
+ppc_rt_sigsuspend:
+       SAVE_NVGPRS(r1)
+       lwz     r0,TRAP(r1)
+       rlwinm  r0,r0,0,0,30
+       stw     r0,TRAP(r1)
+       b       sys_rt_sigsuspend
+
+       .globl  ppc_fork
+ppc_fork:
+       SAVE_NVGPRS(r1)
+       lwz     r0,TRAP(r1)
+       rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
+       stw     r0,TRAP(r1)             /* register set saved */
+       b       sys_fork
+
+       .globl  ppc_vfork
+ppc_vfork:
+       SAVE_NVGPRS(r1)
+       lwz     r0,TRAP(r1)
+       rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
+       stw     r0,TRAP(r1)             /* register set saved */
+       b       sys_vfork
+
+       .globl  ppc_clone
+ppc_clone:
+       SAVE_NVGPRS(r1)
+       lwz     r0,TRAP(r1)
+       rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
+       stw     r0,TRAP(r1)             /* register set saved */
+       b       sys_clone
+
+       .globl  ppc_swapcontext
+ppc_swapcontext:
+       SAVE_NVGPRS(r1)
+       lwz     r0,TRAP(r1)
+       rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
+       stw     r0,TRAP(r1)             /* register set saved */
+       b       sys_swapcontext
+
+/*
+ * Top-level page fault handling.
+ * This is in assembler because if do_page_fault tells us that
+ * it is a bad kernel page fault, we want to save the non-volatile
+ * registers before calling bad_page_fault.
+ */
+       .globl  handle_page_fault
+handle_page_fault:
+       stw     r4,_DAR(r1)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      do_page_fault
+       cmpwi   r3,0
+       beq+    ret_from_except
+       SAVE_NVGPRS(r1)
+       lwz     r0,TRAP(r1)
+       clrrwi  r0,r0,1
+       stw     r0,TRAP(r1)
+       mr      r5,r3
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       lwz     r4,_DAR(r1)
+       bl      bad_page_fault
+       b       ret_from_except_full
+
+/*
+ * This routine switches between two different tasks.  The process
+ * state of one is saved on its kernel stack.  Then the state
+ * of the other is restored from its kernel stack.  The memory
+ * management hardware is updated to the second process's state.
+ * Finally, we can return to the second process.
+ * On entry, r3 points to the THREAD for the current task, r4
+ * points to the THREAD for the new task.
+ *
+ * This routine is always called with interrupts disabled.
+ *
+ * Note: there are two ways to get to the "going out" portion
+ * of this code; either by coming in via the entry (_switch)
+ * or via "fork" which must set up an environment equivalent
+ * to the "_switch" path.  If you change this , you'll have to
+ * change the fork code also.
+ *
+ * The code which creates the new task context is in 'copy_thread'
+ * in arch/ppc/kernel/process.c
+ */
+_GLOBAL(_switch)
+       stwu    r1,-INT_FRAME_SIZE(r1)
+       mflr    r0
+       stw     r0,INT_FRAME_SIZE+4(r1)
+       /* r3-r12 are caller saved -- Cort */
+       SAVE_NVGPRS(r1)
+       stw     r0,_NIP(r1)     /* Return to switch caller */
+       mfmsr   r11
+       li      r0,MSR_FP       /* Disable floating-point */
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+       oris    r0,r0,MSR_VEC@h /* Disable altivec */
+       mfspr   r12,SPRN_VRSAVE /* save vrsave register value */
+       stw     r12,THREAD+THREAD_VRSAVE(r2)
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       oris    r0,r0,MSR_SPE@h  /* Disable SPE */
+       mfspr   r12,SPRN_SPEFSCR /* save spefscr register value */
+       stw     r12,THREAD+THREAD_SPEFSCR(r2)
+#endif /* CONFIG_SPE */
+       and.    r0,r0,r11       /* FP or altivec or SPE enabled? */
+       beq+    1f
+       andc    r11,r11,r0
+       MTMSRD(r11)
+       isync
+1:     stw     r11,_MSR(r1)
+       mfcr    r10
+       stw     r10,_CCR(r1)
+       stw     r1,KSP(r3)      /* Set old stack pointer */
+
+#ifdef CONFIG_SMP
+       /* We need a sync somewhere here to make sure that if the
+        * previous task gets rescheduled on another CPU, it sees all
+        * stores it has performed on this one.
+        */
+       sync
+#endif /* CONFIG_SMP */
+
+       tophys(r0,r4)
+       CLR_TOP32(r0)
+       mtspr   SPRN_SPRG3,r0   /* Update current THREAD phys addr */
+       lwz     r1,KSP(r4)      /* Load new stack pointer */
+
+       /* save the old current 'last' for return value */
+       mr      r3,r2
+       addi    r2,r4,-THREAD   /* Update current */
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+       lwz     r0,THREAD+THREAD_VRSAVE(r2)
+       mtspr   SPRN_VRSAVE,r0          /* if G4, restore VRSAVE reg */
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       lwz     r0,THREAD+THREAD_SPEFSCR(r2)
+       mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
+#endif /* CONFIG_SPE */
+
+       lwz     r0,_CCR(r1)
+       mtcrf   0xFF,r0
+       /* r3-r12 are destroyed -- Cort */
+       REST_NVGPRS(r1)
+
+       lwz     r4,_NIP(r1)     /* Return to _switch caller in new task */
+       mtlr    r4
+       addi    r1,r1,INT_FRAME_SIZE
+       blr
+
+       .globl  fast_exception_return
+fast_exception_return:
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+       andi.   r10,r9,MSR_RI           /* check for recoverable interrupt */
+       beq     1f                      /* if not, we've got problems */
+#endif
+
+2:     REST_4GPRS(3, r11)
+       lwz     r10,_CCR(r11)
+       REST_GPR(1, r11)
+       mtcr    r10
+       lwz     r10,_LINK(r11)
+       mtlr    r10
+       REST_GPR(10, r11)
+       mtspr   SPRN_SRR1,r9
+       mtspr   SPRN_SRR0,r12
+       REST_GPR(9, r11)
+       REST_GPR(12, r11)
+       lwz     r11,GPR11(r11)
+       SYNC
+       RFI
+
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+/* check if the exception happened in a restartable section */
+1:     lis     r3,exc_exit_restart_end@ha
+       addi    r3,r3,exc_exit_restart_end@l
+       cmplw   r12,r3
+       bge     3f
+       lis     r4,exc_exit_restart@ha
+       addi    r4,r4,exc_exit_restart@l
+       cmplw   r12,r4
+       blt     3f
+       lis     r3,fee_restarts@ha
+       tophys(r3,r3)
+       lwz     r5,fee_restarts@l(r3)
+       addi    r5,r5,1
+       stw     r5,fee_restarts@l(r3)
+       mr      r12,r4          /* restart at exc_exit_restart */
+       b       2b
+
+       .comm   fee_restarts,4
+
+/* aargh, a nonrecoverable interrupt, panic */
+/* aargh, we don't know which trap this is */
+/* but the 601 doesn't implement the RI bit, so assume it's OK */
+3:
+BEGIN_FTR_SECTION
+       b       2b
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+       li      r10,-1
+       stw     r10,TRAP(r11)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       lis     r10,MSR_KERNEL@h
+       ori     r10,r10,MSR_KERNEL@l
+       bl      transfer_to_handler_full
+       .long   nonrecoverable_exception
+       .long   ret_from_except
+#endif
+
+       .globl  sigreturn_exit
+sigreturn_exit:
+       subi    r1,r3,STACK_FRAME_OVERHEAD
+       rlwinm  r12,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
+       lwz     r9,TI_FLAGS(r12)
+       andi.   r0,r9,_TIF_SYSCALL_T_OR_A
+       beq+    ret_from_except_full
+       bl      do_syscall_trace_leave
+       /* fall through */
+
+       .globl  ret_from_except_full
+ret_from_except_full:
+       REST_NVGPRS(r1)
+       /* fall through */
+
+       .globl  ret_from_except
+ret_from_except:
+       /* Hard-disable interrupts so that current_thread_info()->flags
+        * can't change between when we test it and when we return
+        * from the interrupt. */
+       LOAD_MSR_KERNEL(r10,MSR_KERNEL)
+       SYNC                    /* Some chip revs have problems here... */
+       MTMSRD(r10)             /* disable interrupts */
+
+       lwz     r3,_MSR(r1)     /* Returning to user mode? */
+       andi.   r0,r3,MSR_PR
+       beq     resume_kernel
+
+user_exc_return:               /* r10 contains MSR_KERNEL here */
+       /* Check current_thread_info()->flags */
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r9,TI_FLAGS(r9)
+       andi.   r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+       bne     do_work
+
+restore_user:
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+       /* Check whether this process has its own DBCR0 value.  The single
+          step bit tells us that dbcr0 should be loaded. */
+       lwz     r0,THREAD+THREAD_DBCR0(r2)
+       andis.  r10,r0,DBCR0_IC@h
+       bnel-   load_dbcr0
+#endif
+
+#ifdef CONFIG_PREEMPT
+       b       restore
+
+/* N.B. the only way to get here is from the beq following ret_from_except. */
+resume_kernel:
+       /* check current_thread_info->preempt_count */
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r0,TI_PREEMPT(r9)
+       cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
+       bne     restore
+       lwz     r0,TI_FLAGS(r9)
+       andi.   r0,r0,_TIF_NEED_RESCHED
+       beq+    restore
+       andi.   r0,r3,MSR_EE    /* interrupts off? */
+       beq     restore         /* don't schedule if so */
+1:     bl      preempt_schedule_irq
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r3,TI_FLAGS(r9)
+       andi.   r0,r3,_TIF_NEED_RESCHED
+       bne-    1b
+#else
+resume_kernel:
+#endif /* CONFIG_PREEMPT */
+
+       /* interrupts are hard-disabled at this point */
+restore:
+       lwz     r0,GPR0(r1)
+       lwz     r2,GPR2(r1)
+       REST_4GPRS(3, r1)
+       REST_2GPRS(7, r1)
+
+       lwz     r10,_XER(r1)
+       lwz     r11,_CTR(r1)
+       mtspr   SPRN_XER,r10
+       mtctr   r11
+
+       PPC405_ERR77(0,r1)
+       stwcx.  r0,0,r1                 /* to clear the reservation */
+
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+       lwz     r9,_MSR(r1)
+       andi.   r10,r9,MSR_RI           /* check if this exception occurred */
+       beql    nonrecoverable          /* at a bad place (MSR:RI = 0) */
+
+       lwz     r10,_CCR(r1)
+       lwz     r11,_LINK(r1)
+       mtcrf   0xFF,r10
+       mtlr    r11
+
+       /*
+        * Once we put values in SRR0 and SRR1, we are in a state
+        * where exceptions are not recoverable, since taking an
+        * exception will trash SRR0 and SRR1.  Therefore we clear the
+        * MSR:RI bit to indicate this.  If we do take an exception,
+        * we can't return to the point of the exception but we
+        * can restart the exception exit path at the label
+        * exc_exit_restart below.  -- paulus
+        */
+       LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
+       SYNC
+       MTMSRD(r10)             /* clear the RI bit */
+       .globl exc_exit_restart
+exc_exit_restart:
+       lwz     r9,_MSR(r1)
+       lwz     r12,_NIP(r1)
+       FIX_SRR1(r9,r10)
+       mtspr   SPRN_SRR0,r12
+       mtspr   SPRN_SRR1,r9
+       REST_4GPRS(9, r1)
+       lwz     r1,GPR1(r1)
+       .globl exc_exit_restart_end
+exc_exit_restart_end:
+       SYNC
+       RFI
+
+#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
+       /*
+        * This is a bit different on 4xx/Book-E because it doesn't have
+        * the RI bit in the MSR.
+        * The TLB miss handler checks if we have interrupted
+        * the exception exit path and restarts it if so
+        * (well maybe one day it will... :).
+        */
+       lwz     r11,_LINK(r1)
+       mtlr    r11
+       lwz     r10,_CCR(r1)
+       mtcrf   0xff,r10
+       REST_2GPRS(9, r1)
+       .globl exc_exit_restart
+exc_exit_restart:
+       lwz     r11,_NIP(r1)
+       lwz     r12,_MSR(r1)
+exc_exit_start:
+       mtspr   SPRN_SRR0,r11
+       mtspr   SPRN_SRR1,r12
+       REST_2GPRS(11, r1)
+       lwz     r1,GPR1(r1)
+       .globl exc_exit_restart_end
+exc_exit_restart_end:
+       PPC405_ERR77_SYNC
+       rfi
+       b       .                       /* prevent prefetch past rfi */
+
+/*
+ * Returning from a critical interrupt in user mode doesn't need
+ * to be any different from a normal exception.  For a critical
+ * interrupt in the kernel, we just return (without checking for
+ * preemption) since the interrupt may have happened at some crucial
+ * place (e.g. inside the TLB miss handler), and because we will be
+ * running with r1 pointing into critical_stack, not the current
+ * process's kernel stack (and therefore current_thread_info() will
+ * give the wrong answer).
+ * We have to restore various SPRs that may have been in use at the
+ * time of the critical interrupt.
+ *
+ */
+#ifdef CONFIG_40x
+#define PPC_40x_TURN_OFF_MSR_DR                                                    \
+       /* avoid any possible TLB misses here by turning off MSR.DR, we     \
+        * assume the instructions here are mapped by a pinned TLB entry */ \
+       li      r10,MSR_IR;                                                 \
+       mtmsr   r10;                                                        \
+       isync;                                                              \
+       tophys(r1, r1);
+#else
+#define PPC_40x_TURN_OFF_MSR_DR
+#endif
+
+#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)    \
+       REST_NVGPRS(r1);                                                \
+       lwz     r3,_MSR(r1);                                            \
+       andi.   r3,r3,MSR_PR;                                           \
+       LOAD_MSR_KERNEL(r10,MSR_KERNEL);                                \
+       bne     user_exc_return;                                        \
+       lwz     r0,GPR0(r1);                                            \
+       lwz     r2,GPR2(r1);                                            \
+       REST_4GPRS(3, r1);                                              \
+       REST_2GPRS(7, r1);                                              \
+       lwz     r10,_XER(r1);                                           \
+       lwz     r11,_CTR(r1);                                           \
+       mtspr   SPRN_XER,r10;                                           \
+       mtctr   r11;                                                    \
+       PPC405_ERR77(0,r1);                                             \
+       stwcx.  r0,0,r1;                /* to clear the reservation */  \
+       lwz     r11,_LINK(r1);                                          \
+       mtlr    r11;                                                    \
+       lwz     r10,_CCR(r1);                                           \
+       mtcrf   0xff,r10;                                               \
+       PPC_40x_TURN_OFF_MSR_DR;                                        \
+       lwz     r9,_DEAR(r1);                                           \
+       lwz     r10,_ESR(r1);                                           \
+       mtspr   SPRN_DEAR,r9;                                           \
+       mtspr   SPRN_ESR,r10;                                           \
+       lwz     r11,_NIP(r1);                                           \
+       lwz     r12,_MSR(r1);                                           \
+       mtspr   exc_lvl_srr0,r11;                                       \
+       mtspr   exc_lvl_srr1,r12;                                       \
+       lwz     r9,GPR9(r1);                                            \
+       lwz     r12,GPR12(r1);                                          \
+       lwz     r10,GPR10(r1);                                          \
+       lwz     r11,GPR11(r1);                                          \
+       lwz     r1,GPR1(r1);                                            \
+       PPC405_ERR77_SYNC;                                              \
+       exc_lvl_rfi;                                                    \
+       b       .;              /* prevent prefetch past exc_lvl_rfi */
+
+       .globl  ret_from_crit_exc
+ret_from_crit_exc:
+       RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
+
+#ifdef CONFIG_BOOKE
+       .globl  ret_from_debug_exc
+ret_from_debug_exc:
+       RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
+
+       .globl  ret_from_mcheck_exc
+ret_from_mcheck_exc:
+       RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
+#endif /* CONFIG_BOOKE */
+
+/*
+ * Load the DBCR0 value for a task that is being ptraced,
+ * having first saved away the global DBCR0.  Note that r0
+ * has the dbcr0 value to set upon entry to this.
+ */
+load_dbcr0:
+       mfmsr   r10             /* first disable debug exceptions */
+       rlwinm  r10,r10,0,~MSR_DE
+       mtmsr   r10
+       isync
+       mfspr   r10,SPRN_DBCR0
+       lis     r11,global_dbcr0@ha
+       addi    r11,r11,global_dbcr0@l
+       stw     r10,0(r11)
+       mtspr   SPRN_DBCR0,r0
+       lwz     r10,4(r11)
+       addi    r10,r10,1
+       stw     r10,4(r11)
+       li      r11,-1
+       mtspr   SPRN_DBSR,r11   /* clear all pending debug events */
+       blr
+
+       .comm   global_dbcr0,8
+#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
+
+do_work:                       /* r10 contains MSR_KERNEL here */
+       andi.   r0,r9,_TIF_NEED_RESCHED
+       beq     do_user_signal
+
+do_resched:                    /* r10 contains MSR_KERNEL here */
+       ori     r10,r10,MSR_EE
+       SYNC
+       MTMSRD(r10)             /* hard-enable interrupts */
+       bl      schedule
+recheck:
+       LOAD_MSR_KERNEL(r10,MSR_KERNEL)
+       SYNC
+       MTMSRD(r10)             /* disable interrupts */
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r9,TI_FLAGS(r9)
+       andi.   r0,r9,_TIF_NEED_RESCHED
+       bne-    do_resched
+       andi.   r0,r9,_TIF_SIGPENDING
+       beq     restore_user
+do_user_signal:                        /* r10 contains MSR_KERNEL here */
+       ori     r10,r10,MSR_EE
+       SYNC
+       MTMSRD(r10)             /* hard-enable interrupts */
+       /* save r13-r31 in the exception frame, if not already done */
+       lwz     r3,TRAP(r1)
+       andi.   r0,r3,1
+       beq     2f
+       SAVE_NVGPRS(r1)
+       rlwinm  r3,r3,0,0,30
+       stw     r3,TRAP(r1)
+2:     li      r3,0
+       addi    r4,r1,STACK_FRAME_OVERHEAD
+       bl      do_signal
+       REST_NVGPRS(r1)
+       b       recheck
+
+/*
+ * We come here when we are at the end of handling an exception
+ * that occurred at a place where taking an exception will lose
+ * state information, such as the contents of SRR0 and SRR1.
+ */
+nonrecoverable:
+       lis     r10,exc_exit_restart_end@ha
+       addi    r10,r10,exc_exit_restart_end@l
+       cmplw   r12,r10
+       bge     3f
+       lis     r11,exc_exit_restart@ha
+       addi    r11,r11,exc_exit_restart@l
+       cmplw   r12,r11
+       blt     3f
+       lis     r10,ee_restarts@ha
+       lwz     r12,ee_restarts@l(r10)
+       addi    r12,r12,1
+       stw     r12,ee_restarts@l(r10)
+       mr      r12,r11         /* restart at exc_exit_restart */
+       blr
+3:     /* OK, we can't recover, kill this process */
+       /* but the 601 doesn't implement the RI bit, so assume it's OK */
+BEGIN_FTR_SECTION
+       blr
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+       lwz     r3,TRAP(r1)
+       andi.   r0,r3,1
+       beq     4f
+       SAVE_NVGPRS(r1)
+       rlwinm  r3,r3,0,0,30
+       stw     r3,TRAP(r1)
+4:     addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      nonrecoverable_exception
+       /* shouldn't return */
+       b       4b
+
+       .comm   ee_restarts,4
+
+/*
+ * PROM code for specific machines follows.  Put it
+ * here so it's easy to add arch-specific sections later.
+ * -- Cort
+ */
+#ifdef CONFIG_PPC_OF
+/*
+ * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
+ * called with the MMU off.
+ */
+_GLOBAL(enter_rtas)
+       stwu    r1,-INT_FRAME_SIZE(r1)
+       mflr    r0
+       stw     r0,INT_FRAME_SIZE+4(r1)
+       lis     r4,rtas_data@ha
+       lwz     r4,rtas_data@l(r4)
+       lis     r6,1f@ha        /* physical return address for rtas */
+       addi    r6,r6,1f@l
+       tophys(r6,r6)
+       tophys(r7,r1)
+       lis     r8,rtas_entry@ha
+       lwz     r8,rtas_entry@l(r8)
+       mfmsr   r9
+       stw     r9,8(r1)
+       LOAD_MSR_KERNEL(r0,MSR_KERNEL)
+       SYNC                    /* disable interrupts so SRR0/1 */
+       MTMSRD(r0)              /* don't get trashed */
+       li      r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+       mtlr    r6
+       CLR_TOP32(r7)
+       mtspr   SPRN_SPRG2,r7
+       mtspr   SPRN_SRR0,r8
+       mtspr   SPRN_SRR1,r9
+       RFI
+1:     tophys(r9,r1)
+       lwz     r8,INT_FRAME_SIZE+4(r9) /* get return address */
+       lwz     r9,8(r9)        /* original msr value */
+       FIX_SRR1(r9,r0)
+       addi    r1,r1,INT_FRAME_SIZE
+       li      r0,0
+       mtspr   SPRN_SPRG2,r0
+       mtspr   SPRN_SRR0,r8
+       mtspr   SPRN_SRR1,r9
+       RFI                     /* return to caller */
+
+       .globl  machine_check_in_rtas
+machine_check_in_rtas:
+       twi     31,0,0
+       /* XXX load up BATs and panic */
+
+#endif /* CONFIG_PPC_OF */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
new file mode 100644 (file)
index 0000000..984a106
--- /dev/null
@@ -0,0 +1,842 @@
+/*
+ *  arch/ppc64/kernel/entry.S
+ *
+ *  PowerPC version 
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *  Adapted for Power Macintosh by Paul Mackerras.
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
+ *
+ *  This file contains the system call entry code, context switch
+ *  code, and exception/interrupt return code for PowerPC.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <asm/unistd.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cputable.h>
+
+#ifdef CONFIG_PPC_ISERIES
+#define DO_SOFT_DISABLE
+#endif
+
+/*
+ * System calls.
+ */
+       .section        ".toc","aw"
+.SYS_CALL_TABLE:
+       .tc .sys_call_table[TC],.sys_call_table
+
+/* This value is used to mark exception frames on the stack. */
+exception_marker:
+       .tc     ID_72656773_68657265[TC],0x7265677368657265
+
+       .section        ".text"
+       .align 7
+
+#undef SHOW_SYSCALLS
+
+       .globl system_call_common
+system_call_common:
+       andi.   r10,r12,MSR_PR
+       mr      r10,r1
+       addi    r1,r1,-INT_FRAME_SIZE
+       beq-    1f
+       ld      r1,PACAKSAVE(r13)
+1:     std     r10,0(r1)
+       std     r11,_NIP(r1)
+       std     r12,_MSR(r1)
+       std     r0,GPR0(r1)
+       std     r10,GPR1(r1)
+       std     r2,GPR2(r1)
+       std     r3,GPR3(r1)
+       std     r4,GPR4(r1)
+       std     r5,GPR5(r1)
+       std     r6,GPR6(r1)
+       std     r7,GPR7(r1)
+       std     r8,GPR8(r1)
+       li      r11,0
+       std     r11,GPR9(r1)
+       std     r11,GPR10(r1)
+       std     r11,GPR11(r1)
+       std     r11,GPR12(r1)
+       std     r9,GPR13(r1)
+       crclr   so
+       mfcr    r9
+       mflr    r10
+       li      r11,0xc01
+       std     r9,_CCR(r1)
+       std     r10,_LINK(r1)
+       std     r11,_TRAP(r1)
+       mfxer   r9
+       mfctr   r10
+       std     r9,_XER(r1)
+       std     r10,_CTR(r1)
+       std     r3,ORIG_GPR3(r1)
+       ld      r2,PACATOC(r13)
+       addi    r9,r1,STACK_FRAME_OVERHEAD
+       ld      r11,exception_marker@toc(r2)
+       std     r11,-16(r9)             /* "regshere" marker */
+#ifdef CONFIG_PPC_ISERIES
+       /* Hack for handling interrupts when soft-enabling on iSeries */
+       cmpdi   cr1,r0,0x5555           /* syscall 0x5555 */
+       andi.   r10,r12,MSR_PR          /* from kernel */
+       crand   4*cr0+eq,4*cr1+eq,4*cr0+eq
+       beq     hardware_interrupt_entry
+       lbz     r10,PACAPROCENABLED(r13)
+       std     r10,SOFTE(r1)
+#endif
+       mfmsr   r11
+       ori     r11,r11,MSR_EE
+       mtmsrd  r11,1
+
+#ifdef SHOW_SYSCALLS
+       bl      .do_show_syscall
+       REST_GPR(0,r1)
+       REST_4GPRS(3,r1)
+       REST_2GPRS(7,r1)
+       addi    r9,r1,STACK_FRAME_OVERHEAD
+#endif
+       clrrdi  r11,r1,THREAD_SHIFT
+       li      r12,0
+       ld      r10,TI_FLAGS(r11)
+       stb     r12,TI_SC_NOERR(r11)
+       andi.   r11,r10,_TIF_SYSCALL_T_OR_A
+       bne-    syscall_dotrace
+syscall_dotrace_cont:
+       cmpldi  0,r0,NR_syscalls
+       bge-    syscall_enosys
+
+system_call:                   /* label this so stack traces look sane */
+/*
+ * Need to vector to 32 Bit or default sys_call_table here,
+ * based on caller's run-mode / personality.
+ */
+       ld      r11,.SYS_CALL_TABLE@toc(2)
+       andi.   r10,r10,_TIF_32BIT
+       beq     15f
+       addi    r11,r11,8       /* use 32-bit syscall entries */
+       clrldi  r3,r3,32
+       clrldi  r4,r4,32
+       clrldi  r5,r5,32
+       clrldi  r6,r6,32
+       clrldi  r7,r7,32
+       clrldi  r8,r8,32
+15:
+       slwi    r0,r0,4
+       ldx     r10,r11,r0      /* Fetch system call handler [ptr] */
+       mtctr   r10
+       bctrl                   /* Call handler */
+
+syscall_exit:
+#ifdef SHOW_SYSCALLS
+       std     r3,GPR3(r1)
+       bl      .do_show_syscall_exit
+       ld      r3,GPR3(r1)
+#endif
+       std     r3,RESULT(r1)
+       ld      r5,_CCR(r1)
+       li      r10,-_LAST_ERRNO
+       cmpld   r3,r10
+       clrrdi  r12,r1,THREAD_SHIFT
+       bge-    syscall_error
+syscall_error_cont:
+
+       /* check for syscall tracing or audit */
+       ld      r9,TI_FLAGS(r12)
+       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+       bne-    syscall_exit_trace
+syscall_exit_trace_cont:
+
+       /* disable interrupts so current_thread_info()->flags can't change,
+          and so that we don't get interrupted after loading SRR0/1. */
+       ld      r8,_MSR(r1)
+       andi.   r10,r8,MSR_RI
+       beq-    unrecov_restore
+       mfmsr   r10
+       rldicl  r10,r10,48,1
+       rotldi  r10,r10,16
+       mtmsrd  r10,1
+       ld      r9,TI_FLAGS(r12)
+       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+       bne-    syscall_exit_work
+       ld      r7,_NIP(r1)
+       stdcx.  r0,0,r1                 /* to clear the reservation */
+       andi.   r6,r8,MSR_PR
+       ld      r4,_LINK(r1)
+       beq-    1f                      /* only restore r13 if */
+       ld      r13,GPR13(r1)           /* returning to usermode */
+1:     ld      r2,GPR2(r1)
+       li      r12,MSR_RI
+       andc    r10,r10,r12
+       mtmsrd  r10,1                   /* clear MSR.RI */
+       ld      r1,GPR1(r1)
+       mtlr    r4
+       mtcr    r5
+       mtspr   SPRN_SRR0,r7
+       mtspr   SPRN_SRR1,r8
+       rfid
+       b       .       /* prevent speculative execution */
+
+syscall_enosys:
+       li      r3,-ENOSYS
+       std     r3,RESULT(r1)
+       clrrdi  r12,r1,THREAD_SHIFT
+       ld      r5,_CCR(r1)
+
+syscall_error:
+       lbz     r11,TI_SC_NOERR(r12)
+       cmpwi   0,r11,0
+       bne-    syscall_error_cont
+       neg     r3,r3
+       oris    r5,r5,0x1000    /* Set SO bit in CR */
+       std     r5,_CCR(r1)
+       b       syscall_error_cont
+        
+/* Traced system call support */
+syscall_dotrace:
+       bl      .save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .do_syscall_trace_enter
+       ld      r0,GPR0(r1)     /* Restore original registers */
+       ld      r3,GPR3(r1)
+       ld      r4,GPR4(r1)
+       ld      r5,GPR5(r1)
+       ld      r6,GPR6(r1)
+       ld      r7,GPR7(r1)
+       ld      r8,GPR8(r1)
+       addi    r9,r1,STACK_FRAME_OVERHEAD
+       clrrdi  r10,r1,THREAD_SHIFT
+       ld      r10,TI_FLAGS(r10)
+       b       syscall_dotrace_cont
+
+syscall_exit_trace:
+       std     r3,GPR3(r1)
+       bl      .save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .do_syscall_trace_leave
+       REST_NVGPRS(r1)
+       ld      r3,GPR3(r1)
+       ld      r5,_CCR(r1)
+       clrrdi  r12,r1,THREAD_SHIFT
+       b       syscall_exit_trace_cont
+
+/* Stuff to do on exit from a system call. */
+syscall_exit_work:
+       std     r3,GPR3(r1)
+       std     r5,_CCR(r1)
+       b       .ret_from_except_lite
+
+/* Save non-volatile GPRs, if not already saved. */
+_GLOBAL(save_nvgprs)
+       ld      r11,_TRAP(r1)
+       andi.   r0,r11,1
+       beqlr-
+       SAVE_NVGPRS(r1)
+       clrrdi  r0,r11,1
+       std     r0,_TRAP(r1)
+       blr
+
+/*
+ * The sigsuspend and rt_sigsuspend system calls can call do_signal
+ * and thus put the process into the stopped state where we might
+ * want to examine its user state with ptrace.  Therefore we need
+ * to save all the nonvolatile registers (r14 - r31) before calling
+ * the C code.  Similarly, fork, vfork and clone need the full
+ * register state on the stack so that it can be copied to the child.
+ */
+_GLOBAL(ppc32_sigsuspend)
+       bl      .save_nvgprs
+       bl      .compat_sys_sigsuspend
+       b       70f
+
+_GLOBAL(ppc64_rt_sigsuspend)
+       bl      .save_nvgprs
+       bl      .sys_rt_sigsuspend
+       b       70f
+
+_GLOBAL(ppc32_rt_sigsuspend)
+       bl      .save_nvgprs
+       bl      .compat_sys_rt_sigsuspend
+70:    cmpdi   0,r3,0
+       /* If it returned an error, we need to return via syscall_exit to set
+          the SO bit in cr0 and potentially stop for ptrace. */
+       bne     syscall_exit
+       /* If sigsuspend() returns zero, we are going into a signal handler. We
+          may need to call audit_syscall_exit() to mark the exit from sigsuspend() */
+#ifdef CONFIG_AUDIT
+       ld      r3,PACACURRENT(r13)
+       ld      r4,AUDITCONTEXT(r3)
+       cmpdi   0,r4,0
+       beq     .ret_from_except        /* No audit_context: Leave immediately. */
+       li      r4, 2                   /* AUDITSC_FAILURE */
+       li      r5,-4                   /* It's always -EINTR */
+       bl      .audit_syscall_exit
+#endif
+       b       .ret_from_except
+
+_GLOBAL(ppc_fork)
+       bl      .save_nvgprs
+       bl      .sys_fork
+       b       syscall_exit
+
+_GLOBAL(ppc_vfork)
+       bl      .save_nvgprs
+       bl      .sys_vfork
+       b       syscall_exit
+
+_GLOBAL(ppc_clone)
+       bl      .save_nvgprs
+       bl      .sys_clone
+       b       syscall_exit
+
+_GLOBAL(ppc32_swapcontext)
+       bl      .save_nvgprs
+       bl      .compat_sys_swapcontext
+       b       80f
+       
+_GLOBAL(ppc64_swapcontext)
+       bl      .save_nvgprs
+       bl      .sys_swapcontext
+       b       80f
+
+_GLOBAL(ppc32_sigreturn)
+       bl      .compat_sys_sigreturn
+       b       80f
+
+_GLOBAL(ppc32_rt_sigreturn)
+       bl      .compat_sys_rt_sigreturn
+       b       80f
+
+_GLOBAL(ppc64_rt_sigreturn)
+       bl      .sys_rt_sigreturn
+
+80:    cmpdi   0,r3,0
+       blt     syscall_exit
+       clrrdi  r4,r1,THREAD_SHIFT
+       ld      r4,TI_FLAGS(r4)
+       andi.   r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+       beq+    81f
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .do_syscall_trace_leave
+81:    b       .ret_from_except
+
+_GLOBAL(ret_from_fork)
+       bl      .schedule_tail
+       REST_NVGPRS(r1)
+       li      r3,0
+       b       syscall_exit
+
+/*
+ * This routine switches between two different tasks.  The process
+ * state of one is saved on its kernel stack.  Then the state
+ * of the other is restored from its kernel stack.  The memory
+ * management hardware is updated to the second process's state.
+ * Finally, we can return to the second process, via ret_from_except.
+ * On entry, r3 points to the THREAD for the current task, r4
+ * points to the THREAD for the new task.
+ *
+ * Note: there are two ways to get to the "going out" portion
+ * of this code; either by coming in via the entry (_switch)
+ * or via "fork" which must set up an environment equivalent
+ * to the "_switch" path.  If you change this you'll have to change
+ * the fork code also.
+ *
+ * The code which creates the new task context is in 'copy_thread'
+ * in arch/ppc64/kernel/process.c
+ */
+       .align  7
+_GLOBAL(_switch)
+       mflr    r0
+       std     r0,16(r1)
+       stdu    r1,-SWITCH_FRAME_SIZE(r1)
+       /* r3-r13 are caller saved -- Cort */
+       SAVE_8GPRS(14, r1)
+       SAVE_10GPRS(22, r1)
+       mflr    r20             /* Return to switch caller */
+       mfmsr   r22
+       li      r0, MSR_FP
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+       oris    r0,r0,MSR_VEC@h /* Disable altivec */
+       mfspr   r24,SPRN_VRSAVE /* save vrsave register value */
+       std     r24,THREAD_VRSAVE(r3)
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+       and.    r0,r0,r22
+       beq+    1f
+       andc    r22,r22,r0
+       mtmsrd  r22
+       isync
+1:     std     r20,_NIP(r1)
+       mfcr    r23
+       std     r23,_CCR(r1)
+       std     r1,KSP(r3)      /* Set old stack pointer */
+
+#ifdef CONFIG_SMP
+       /* We need a sync somewhere here to make sure that if the
+        * previous task gets rescheduled on another CPU, it sees all
+        * stores it has performed on this one.
+        */
+       sync
+#endif /* CONFIG_SMP */
+
+       addi    r6,r4,-THREAD   /* Convert THREAD to 'current' */
+       std     r6,PACACURRENT(r13)     /* Set new 'current' */
+
+       ld      r8,KSP(r4)      /* new stack pointer */
+BEGIN_FTR_SECTION
+       clrrdi  r6,r8,28        /* get its ESID */
+       clrrdi  r9,r1,28        /* get current sp ESID */
+       clrldi. r0,r6,2         /* is new ESID c00000000? */
+       cmpd    cr1,r6,r9       /* or is new ESID the same as current ESID? */
+       cror    eq,4*cr1+eq,eq
+       beq     2f              /* if yes, don't slbie it */
+
+       /* Bolt in the new stack SLB entry */
+       ld      r7,KSP_VSID(r4) /* Get new stack's VSID */
+       oris    r0,r6,(SLB_ESID_V)@h
+       ori     r0,r0,(SLB_NUM_BOLTED-1)@l
+       slbie   r6
+       slbie   r6              /* Workaround POWER5 < DD2.1 issue */
+       slbmte  r7,r0
+       isync
+
+2:
+END_FTR_SECTION_IFSET(CPU_FTR_SLB)
+       clrrdi  r7,r8,THREAD_SHIFT      /* base of new stack */
+       /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
+          because we don't need to leave the 288-byte ABI gap at the
+          top of the kernel stack. */
+       addi    r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
+
+       mr      r1,r8           /* start using new stack pointer */
+       std     r7,PACAKSAVE(r13)
+
+       ld      r6,_CCR(r1)
+       mtcrf   0xFF,r6
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+       ld      r0,THREAD_VRSAVE(r4)
+       mtspr   SPRN_VRSAVE,r0          /* if G4, restore VRSAVE reg */
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+
+       /* r3-r13 are destroyed -- Cort */
+       REST_8GPRS(14, r1)
+       REST_10GPRS(22, r1)
+
+       /* convert old thread to its task_struct for return value */
+       addi    r3,r3,-THREAD
+       ld      r7,_NIP(r1)     /* Return to _switch caller in new task */
+       mtlr    r7
+       addi    r1,r1,SWITCH_FRAME_SIZE
+       blr
+
+       .align  7
+_GLOBAL(ret_from_except)
+       ld      r11,_TRAP(r1)
+       andi.   r0,r11,1
+       bne     .ret_from_except_lite
+       REST_NVGPRS(r1)
+
+_GLOBAL(ret_from_except_lite)
+       /*
+        * Disable interrupts so that current_thread_info()->flags
+        * can't change between when we test it and when we return
+        * from the interrupt.
+        */
+       mfmsr   r10             /* Get current interrupt state */
+       rldicl  r9,r10,48,1     /* clear MSR_EE */
+       rotldi  r9,r9,16
+       mtmsrd  r9,1            /* Update machine state */
+
+#ifdef CONFIG_PREEMPT
+       clrrdi  r9,r1,THREAD_SHIFT      /* current_thread_info() */
+       li      r0,_TIF_NEED_RESCHED    /* bits to check */
+       ld      r3,_MSR(r1)
+       ld      r4,TI_FLAGS(r9)
+       /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
+       rlwimi  r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
+       and.    r0,r4,r0        /* check NEED_RESCHED and maybe SIGPENDING */
+       bne     do_work
+
+#else /* !CONFIG_PREEMPT */
+       ld      r3,_MSR(r1)     /* Returning to user mode? */
+       andi.   r3,r3,MSR_PR
+       beq     restore         /* if not, just restore regs and return */
+
+       /* Check current_thread_info()->flags */
+       clrrdi  r9,r1,THREAD_SHIFT
+       ld      r4,TI_FLAGS(r9)
+       andi.   r0,r4,_TIF_USER_WORK_MASK
+       bne     do_work
+#endif
+
+restore:
+#ifdef CONFIG_PPC_ISERIES
+       ld      r5,SOFTE(r1)
+       cmpdi   0,r5,0
+       beq     4f
+       /* Check for pending interrupts (iSeries) */
+       ld      r3,PACALPPACA+LPPACAANYINT(r13)
+       cmpdi   r3,0
+       beq+    4f                      /* skip do_IRQ if no interrupts */
+
+       li      r3,0
+       stb     r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
+       ori     r10,r10,MSR_EE
+       mtmsrd  r10                     /* hard-enable again */
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .do_IRQ
+       b       .ret_from_except_lite           /* loop back and handle more */
+
+4:     stb     r5,PACAPROCENABLED(r13)
+#endif
+
+       ld      r3,_MSR(r1)
+       andi.   r0,r3,MSR_RI
+       beq-    unrecov_restore
+
+       andi.   r0,r3,MSR_PR
+
+       /*
+        * r13 is our per cpu area, only restore it if we are returning to
+        * userspace
+        */
+       beq     1f
+       REST_GPR(13, r1)
+1:
+       ld      r3,_CTR(r1)
+       ld      r0,_LINK(r1)
+       mtctr   r3
+       mtlr    r0
+       ld      r3,_XER(r1)
+       mtspr   SPRN_XER,r3
+
+       REST_8GPRS(5, r1)
+
+       stdcx.  r0,0,r1         /* to clear the reservation */
+
+       mfmsr   r0
+       li      r2, MSR_RI
+       andc    r0,r0,r2
+       mtmsrd  r0,1
+
+       ld      r0,_MSR(r1)
+       mtspr   SPRN_SRR1,r0
+
+       ld      r2,_CCR(r1)
+       mtcrf   0xFF,r2
+       ld      r2,_NIP(r1)
+       mtspr   SPRN_SRR0,r2
+
+       ld      r0,GPR0(r1)
+       ld      r2,GPR2(r1)
+       ld      r3,GPR3(r1)
+       ld      r4,GPR4(r1)
+       ld      r1,GPR1(r1)
+
+       rfid
+       b       .       /* prevent speculative execution */
+
+/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
+do_work:
+#ifdef CONFIG_PREEMPT
+       andi.   r0,r3,MSR_PR    /* Returning to user mode? */
+       bne     user_work
+       /* Check that preempt_count() == 0 and interrupts are enabled */
+       lwz     r8,TI_PREEMPT(r9)
+       cmpwi   cr1,r8,0
+#ifdef CONFIG_PPC_ISERIES
+       ld      r0,SOFTE(r1)
+       cmpdi   r0,0
+#else
+       andi.   r0,r3,MSR_EE
+#endif
+       crandc  eq,cr1*4+eq,eq
+       bne     restore
+       /* here we are preempting the current task */
+1:
+#ifdef CONFIG_PPC_ISERIES
+       li      r0,1
+       stb     r0,PACAPROCENABLED(r13)
+#endif
+       ori     r10,r10,MSR_EE
+       mtmsrd  r10,1           /* reenable interrupts */
+       bl      .preempt_schedule
+       mfmsr   r10
+       clrrdi  r9,r1,THREAD_SHIFT
+       rldicl  r10,r10,48,1    /* disable interrupts again */
+       rotldi  r10,r10,16
+       mtmsrd  r10,1
+       ld      r4,TI_FLAGS(r9)
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       bne     1b
+       b       restore
+
+user_work:
+#endif
+       /* Enable interrupts */
+       ori     r10,r10,MSR_EE
+       mtmsrd  r10,1
+
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       beq     1f
+       bl      .schedule
+       b       .ret_from_except_lite
+
+1:     bl      .save_nvgprs
+       li      r3,0
+       addi    r4,r1,STACK_FRAME_OVERHEAD
+       bl      .do_signal
+       b       .ret_from_except
+
+unrecov_restore:
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .unrecoverable_exception
+       b       unrecov_restore
+
+#ifdef CONFIG_PPC_RTAS
+/*
+ * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
+ * called with the MMU off.
+ *
+ * In addition, we need to be in 32b mode, at least for now.
+ * 
+ * Note: r3 is an input parameter to rtas, so don't trash it...
+ */
+_GLOBAL(enter_rtas)
+       mflr    r0
+       std     r0,16(r1)
+        stdu   r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
+
+       /* Because RTAS is running in 32b mode, it clobbers the high order half
+        * of all registers that it saves.  We therefore save those registers
+        * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
+        */
+       SAVE_GPR(2, r1)                 /* Save the TOC */
+       SAVE_GPR(13, r1)                /* Save paca */
+       SAVE_8GPRS(14, r1)              /* Save the non-volatiles */
+       SAVE_10GPRS(22, r1)             /* ditto */
+
+       mfcr    r4
+       std     r4,_CCR(r1)
+       mfctr   r5
+       std     r5,_CTR(r1)
+       mfspr   r6,SPRN_XER
+       std     r6,_XER(r1)
+       mfdar   r7
+       std     r7,_DAR(r1)
+       mfdsisr r8
+       std     r8,_DSISR(r1)
+       mfsrr0  r9
+       std     r9,_SRR0(r1)
+       mfsrr1  r10
+       std     r10,_SRR1(r1)
+
+       /* There is no way it is acceptable to get here with interrupts enabled,
+        * check it with the asm equivalent of WARN_ON
+        */
+       mfmsr   r6
+       andi.   r0,r6,MSR_EE
+1:     tdnei   r0,0
+.section __bug_table,"a"
+       .llong  1b,__LINE__ + 0x1000000, 1f, 2f
+.previous
+.section .rodata,"a"
+1:     .asciz  __FILE__
+2:     .asciz "enter_rtas"
+.previous
+       
+       /* Unfortunately, the stack pointer and the MSR are also clobbered,
+        * so they are saved in the PACA which allows us to restore
+        * our original state after RTAS returns.
+         */
+       std     r1,PACAR1(r13)
+        std    r6,PACASAVEDMSR(r13)
+
+       /* Setup our real return addr */        
+       SET_REG_TO_LABEL(r4,.rtas_return_loc)
+       SET_REG_TO_CONST(r9,KERNELBASE)
+       sub     r4,r4,r9
+               mtlr    r4
+
+       li      r0,0
+       ori     r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
+       andc    r0,r6,r0
+       
+        li      r9,1
+        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
+       ori     r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
+       andc    r6,r0,r9
+       ori     r6,r6,MSR_RI
+       sync                            /* disable interrupts so SRR0/1 */
+       mtmsrd  r0                      /* don't get trashed */
+
+       SET_REG_TO_LABEL(r4,rtas)
+       ld      r5,RTASENTRY(r4)        /* get the rtas->entry value */
+       ld      r4,RTASBASE(r4)         /* get the rtas->base value */
+       
+       mtspr   SPRN_SRR0,r5
+       mtspr   SPRN_SRR1,r6
+       rfid
+       b       .       /* prevent speculative execution */
+
+_STATIC(rtas_return_loc)
+       /* relocation is off at this point */
+       mfspr   r4,SPRN_SPRG3           /* Get PACA */
+       SET_REG_TO_CONST(r5, KERNELBASE)
+        sub     r4,r4,r5                /* RELOC the PACA base pointer */
+
+       mfmsr   r6
+       li      r0,MSR_RI
+       andc    r6,r6,r0
+       sync    
+       mtmsrd  r6
+        
+        ld     r1,PACAR1(r4)           /* Restore our SP */
+       LOADADDR(r3,.rtas_restore_regs)
+        ld     r4,PACASAVEDMSR(r4)     /* Restore our MSR */
+
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
+       rfid
+       b       .       /* prevent speculative execution */
+
+_STATIC(rtas_restore_regs)
+       /* relocation is on at this point */
+       REST_GPR(2, r1)                 /* Restore the TOC */
+       REST_GPR(13, r1)                /* Restore paca */
+       REST_8GPRS(14, r1)              /* Restore the non-volatiles */
+       REST_10GPRS(22, r1)             /* ditto */
+
+       mfspr   r13,SPRN_SPRG3
+
+       ld      r4,_CCR(r1)
+       mtcr    r4
+       ld      r5,_CTR(r1)
+       mtctr   r5
+       ld      r6,_XER(r1)
+       mtspr   SPRN_XER,r6
+       ld      r7,_DAR(r1)
+       mtdar   r7
+       ld      r8,_DSISR(r1)
+       mtdsisr r8
+       ld      r9,_SRR0(r1)
+       mtsrr0  r9
+       ld      r10,_SRR1(r1)
+       mtsrr1  r10
+
+        addi   r1,r1,RTAS_FRAME_SIZE   /* Unstack our frame */
+       ld      r0,16(r1)               /* get return address */
+
+       mtlr    r0
+        blr                            /* return to caller */
+
+#endif /* CONFIG_PPC_RTAS */
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+
+_GLOBAL(enter_prom)
+       mflr    r0
+       std     r0,16(r1)
+        stdu   r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
+
+       /* Because PROM is running in 32b mode, it clobbers the high order half
+        * of all registers that it saves.  We therefore save those registers
+        * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
+        */
+       SAVE_8GPRS(2, r1)
+       SAVE_GPR(13, r1)
+       SAVE_8GPRS(14, r1)
+       SAVE_10GPRS(22, r1)
+       mfcr    r4
+       std     r4,_CCR(r1)
+       mfctr   r5
+       std     r5,_CTR(r1)
+       mfspr   r6,SPRN_XER
+       std     r6,_XER(r1)
+       mfdar   r7
+       std     r7,_DAR(r1)
+       mfdsisr r8
+       std     r8,_DSISR(r1)
+       mfsrr0  r9
+       std     r9,_SRR0(r1)
+       mfsrr1  r10
+       std     r10,_SRR1(r1)
+       mfmsr   r11
+       std     r11,_MSR(r1)
+
+       /* Get the PROM entrypoint */
+       ld      r0,GPR4(r1)
+       mtlr    r0
+
+       /* Switch MSR to 32 bits mode
+        */
+        mfmsr   r11
+        li      r12,1
+        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
+        andc    r11,r11,r12
+        li      r12,1
+        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
+        andc    r11,r11,r12
+        mtmsrd  r11
+        isync
+
+       /* Restore arguments & enter PROM here... */
+       ld      r3,GPR3(r1)
+       blrl
+
+       /* Just make sure that r1 top 32 bits didn't get
+        * corrupt by OF
+        */
+       rldicl  r1,r1,0,32
+
+       /* Restore the MSR (back to 64 bits) */
+       ld      r0,_MSR(r1)
+       mtmsrd  r0
+        isync
+
+       /* Restore other registers */
+       REST_GPR(2, r1)
+       REST_GPR(13, r1)
+       REST_8GPRS(14, r1)
+       REST_10GPRS(22, r1)
+       ld      r4,_CCR(r1)
+       mtcr    r4
+       ld      r5,_CTR(r1)
+       mtctr   r5
+       ld      r6,_XER(r1)
+       mtspr   SPRN_XER,r6
+       ld      r7,_DAR(r1)
+       mtdar   r7
+       ld      r8,_DSISR(r1)
+       mtdsisr r8
+       ld      r9,_SRR0(r1)
+       mtsrr0  r9
+       ld      r10,_SRR1(r1)
+       mtsrr1  r10
+       
+        addi   r1,r1,PROM_FRAME_SIZE
+       ld      r0,16(r1)
+       mtlr    r0
+        blr
+       
+#endif /* CONFIG_PPC_MULTIPLATFORM */
similarity index 69%
rename from arch/ppc/kernel/fpu.S
rename to arch/powerpc/kernel/fpu.S
index 665d7d3..563d445 100644 (file)
@@ -10,7 +10,7 @@
  */
 
 #include <linux/config.h>
-#include <asm/processor.h>
+#include <asm/reg.h>
 #include <asm/page.h>
 #include <asm/mmu.h>
 #include <asm/pgtable.h>
  * Load up this task's FP registers from its thread_struct,
  * enable the FPU for the current task and return to the task.
  */
-       .globl  load_up_fpu
-load_up_fpu:
+_GLOBAL(load_up_fpu)
        mfmsr   r5
        ori     r5,r5,MSR_FP
-#ifdef CONFIG_PPC64BRIDGE
-       clrldi  r5,r5,1                 /* turn off 64-bit mode */
-#endif /* CONFIG_PPC64BRIDGE */
        SYNC
        MTMSRD(r5)                      /* enable use of fpu now */
        isync
@@ -43,67 +39,57 @@ load_up_fpu:
  * to another.  Instead we call giveup_fpu in switch_to.
  */
 #ifndef CONFIG_SMP
-       tophys(r6,0)                    /* get __pa constant */
-       addis   r3,r6,last_task_used_math@ha
-       lwz     r4,last_task_used_math@l(r3)
-       cmpwi   0,r4,0
+       LOADBASE(r3, last_task_used_math)
+       tophys(r3,r3)
+       LDL     r4,OFF(last_task_used_math)(r3)
+       CMPI    0,r4,0
        beq     1f
-       add     r4,r4,r6
+       tophys(r4,r4)
        addi    r4,r4,THREAD            /* want last_task_used_math->thread */
        SAVE_32FPRS(0, r4)
        mffs    fr0
        stfd    fr0,THREAD_FPSCR-4(r4)
-       lwz     r5,PT_REGS(r4)
-       add     r5,r5,r6
-       lwz     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       LDL     r5,PT_REGS(r4)
+       tophys(r5,r5)
+       LDL     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
        li      r10,MSR_FP|MSR_FE0|MSR_FE1
        andc    r4,r4,r10               /* disable FP for previous task */
-       stw     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       STL     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
 #endif /* CONFIG_SMP */
        /* enable use of FP after return */
+#ifdef CONFIG_PPC32
        mfspr   r5,SPRN_SPRG3           /* current task's THREAD (phys) */
        lwz     r4,THREAD_FPEXC_MODE(r5)
        ori     r9,r9,MSR_FP            /* enable FP for current */
        or      r9,r9,r4
+#else
+       ld      r4,PACACURRENT(r13)
+       addi    r5,r4,THREAD            /* Get THREAD */
+       ld      r4,THREAD_FPEXC_MODE(r5)
+       ori     r12,r12,MSR_FP
+       or      r12,r12,r4
+       std     r12,_MSR(r1)
+#endif
        lfd     fr0,THREAD_FPSCR-4(r5)
        mtfsf   0xff,fr0
        REST_32FPRS(0, r5)
 #ifndef CONFIG_SMP
        subi    r4,r5,THREAD
-       sub     r4,r4,r6
-       stw     r4,last_task_used_math@l(r3)
+       tovirt(r4,r4)
+       STL     r4,OFF(last_task_used_math)(r3)
 #endif /* CONFIG_SMP */
        /* restore registers and return */
        /* we haven't used ctr or xer or lr */
        b       fast_exception_return
 
 /*
- * FP unavailable trap from kernel - print a message, but let
- * the task use FP in the kernel until it returns to user mode.
- */
-       .globl  KernelFP
-KernelFP:
-       lwz     r3,_MSR(r1)
-       ori     r3,r3,MSR_FP
-       stw     r3,_MSR(r1)             /* enable use of FP after return */
-       lis     r3,86f@h
-       ori     r3,r3,86f@l
-       mr      r4,r2                   /* current */
-       lwz     r5,_NIP(r1)
-       bl      printk
-       b       ret_from_except
-86:    .string "floating point used in kernel (task=%p, pc=%x)\n"
-       .align  4,0
-
-/*
  * giveup_fpu(tsk)
  * Disable FP for the task given as the argument,
  * and save the floating-point registers in its thread_struct.
  * Enables the FPU for use in the kernel on return.
  */
-       .globl  giveup_fpu
-giveup_fpu:
+_GLOBAL(giveup_fpu)
        mfmsr   r5
        ori     r5,r5,MSR_FP
        SYNC_601
@@ -111,23 +97,23 @@ giveup_fpu:
        MTMSRD(r5)                      /* enable use of fpu now */
        SYNC_601
        isync
-       cmpwi   0,r3,0
+       CMPI    0,r3,0
        beqlr-                          /* if no previous owner, done */
        addi    r3,r3,THREAD            /* want THREAD of task */
-       lwz     r5,PT_REGS(r3)
-       cmpwi   0,r5,0
+       LDL     r5,PT_REGS(r3)
+       CMPI    0,r5,0
        SAVE_32FPRS(0, r3)
        mffs    fr0
        stfd    fr0,THREAD_FPSCR-4(r3)
        beq     1f
-       lwz     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       LDL     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
        li      r3,MSR_FP|MSR_FE0|MSR_FE1
        andc    r4,r4,r3                /* disable FP for previous task */
-       stw     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       STL     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
 #ifndef CONFIG_SMP
        li      r5,0
-       lis     r4,last_task_used_math@ha
-       stw     r5,last_task_used_math@l(r4)
+       LOADBASE(r4,last_task_used_math)
+       STL     r5,OFF(last_task_used_math)(r4)
 #endif /* CONFIG_SMP */
        blr
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
new file mode 100644 (file)
index 0000000..cd51fe5
--- /dev/null
@@ -0,0 +1,1371 @@
+/*
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *  Adapted for Power Macintosh by Paul Mackerras.
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  This file contains the low-level support and setup for the
+ *  PowerPC platform, including trap and interrupt dispatch.
+ *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_APUS
+#include <asm/amigappc.h>
+#endif
+
+/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
+#define LOAD_BAT(n, reg, RA, RB)       \
+       /* see the comment for clear_bats() -- Cort */ \
+       li      RA,0;                   \
+       mtspr   SPRN_IBAT##n##U,RA;     \
+       mtspr   SPRN_DBAT##n##U,RA;     \
+       lwz     RA,(n*16)+0(reg);       \
+       lwz     RB,(n*16)+4(reg);       \
+       mtspr   SPRN_IBAT##n##U,RA;     \
+       mtspr   SPRN_IBAT##n##L,RB;     \
+       beq     1f;                     \
+       lwz     RA,(n*16)+8(reg);       \
+       lwz     RB,(n*16)+12(reg);      \
+       mtspr   SPRN_DBAT##n##U,RA;     \
+       mtspr   SPRN_DBAT##n##L,RB;     \
+1:
+
+       .text
+       .stabs  "arch/powerpc/kernel/",N_SO,0,0,0f
+       .stabs  "head_32.S",N_SO,0,0,0f
+0:
+       .globl  _stext
+_stext:
+
+/*
+ * _start is defined this way because the XCOFF loader in the OpenFirmware
+ * on the powermac expects the entry point to be a procedure descriptor.
+ */
+       .text
+       .globl  _start
+_start:
+       /*
+        * These are here for legacy reasons, the kernel used to
+        * need to look like a coff function entry for the pmac
+        * but we're always started by some kind of bootloader now.
+        *  -- Cort
+        */
+       nop     /* used by __secondary_hold on prep (mtx) and chrp smp */
+       nop     /* used by __secondary_hold on prep (mtx) and chrp smp */
+       nop
+
+/* PMAC
+ * Enter here with the kernel text, data and bss loaded starting at
+ * 0, running with virtual == physical mapping.
+ * r5 points to the prom entry point (the client interface handler
+ * address).  Address translation is turned on, with the prom
+ * managing the hash table.  Interrupts are disabled.  The stack
+ * pointer (r1) points to just below the end of the half-meg region
+ * from 0x380000 - 0x400000, which is mapped in already.
+ *
+ * If we are booted from MacOS via BootX, we enter with the kernel
+ * image loaded somewhere, and the following values in registers:
+ *  r3: 'BooX' (0x426f6f58)
+ *  r4: virtual address of boot_infos_t
+ *  r5: 0
+ *
+ * APUS
+ *   r3: 'APUS'
+ *   r4: physical address of memory base
+ *   Linux/m68k style BootInfo structure at &_end.
+ *
+ * PREP
+ * This is jumped to on prep systems right after the kernel is relocated
+ * to its proper place in memory by the boot loader.  The expected layout
+ * of the regs is:
+ *   r3: ptr to residual data
+ *   r4: initrd_start or if no initrd then 0
+ *   r5: initrd_end - unused if r4 is 0
+ *   r6: Start of command line string
+ *   r7: End of command line string
+ *
+ * This just gets a minimal mmu environment setup so we can call
+ * start_here() to do the real work.
+ * -- Cort
+ */
+
+       .globl  __start
+__start:
+/*
+ * We have to do any OF calls before we map ourselves to KERNELBASE,
+ * because OF may have I/O devices mapped into that area
+ * (particularly on CHRP).
+ */
+       cmpwi   0,r5,0
+       beq     1f
+       bl      prom_init
+       trap
+
+1:     mr      r31,r3                  /* save parameters */
+       mr      r30,r4
+       li      r24,0                   /* cpu # */
+
+/*
+ * early_init() does the early machine identification and does
+ * the necessary low-level setup and clears the BSS
+ *  -- Cort <cort@fsmlabs.com>
+ */
+       bl      early_init
+
+#ifdef CONFIG_APUS
+/* On APUS the __va/__pa constants need to be set to the correct
+ * values before continuing.
+ */
+       mr      r4,r30
+       bl      fix_mem_constants
+#endif /* CONFIG_APUS */
+
+/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
+ * the physical address we are running at, returned by early_init()
+ */
+       bl      mmu_off
+__after_mmu_off:
+       bl      clear_bats
+       bl      flush_tlbs
+
+       bl      initial_bats
+
+/*
+ * Call setup_cpu for CPU 0 and initialize 6xx Idle
+ */
+       bl      reloc_offset
+       li      r24,0                   /* cpu# */
+       bl      call_setup_cpu          /* Call setup_cpu for this CPU */
+#ifdef CONFIG_6xx
+       bl      reloc_offset
+       bl      init_idle_6xx
+#endif /* CONFIG_6xx */
+
+
+#ifndef CONFIG_APUS
+/*
+ * We need to run with _start at physical address 0.
+ * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
+ * the exception vectors at 0 (and therefore this copy
+ * overwrites OF's exception vectors with our own).
+ * The MMU is off at this point.
+ */
+       bl      reloc_offset
+       mr      r26,r3
+       addis   r4,r3,KERNELBASE@h      /* current address of _start */
+       cmpwi   0,r4,0                  /* are we already running at 0? */
+       bne     relocate_kernel
+#endif /* CONFIG_APUS */
+/*
+ * we now have the 1st 16M of ram mapped with the bats.
+ * prep needs the mmu to be turned on here, but pmac already has it on.
+ * this shouldn't bother the pmac since it just gets turned on again
+ * as we jump to our code at KERNELBASE. -- Cort
+ * Actually no, pmac doesn't have it on any more. BootX enters with MMU
+ * off, and in other cases, we now turn it off before changing BATs above.
+ */
+turn_on_mmu:
+       mfmsr   r0
+       ori     r0,r0,MSR_DR|MSR_IR
+       mtspr   SPRN_SRR1,r0
+       lis     r0,start_here@h
+       ori     r0,r0,start_here@l
+       mtspr   SPRN_SRR0,r0
+       SYNC
+       RFI                             /* enables MMU */
+
+/*
+ * We need __secondary_hold as a place to hold the other cpus on
+ * an SMP machine, even when we are running a UP kernel.
+ */
+       . = 0xc0                        /* for prep bootloader */
+       li      r3,1                    /* MTX only has 1 cpu */
+       .globl  __secondary_hold
+__secondary_hold:
+       /* tell the master we're here */
+       stw     r3,4(0)
+#ifdef CONFIG_SMP
+100:   lwz     r4,0(0)
+       /* wait until we're told to start */
+       cmpw    0,r4,r3
+       bne     100b
+       /* our cpu # was at addr 0 - go */
+       mr      r24,r3                  /* cpu # */
+       b       __secondary_start
+#else
+       b       .
+#endif /* CONFIG_SMP */
+
+/*
+ * Exception entry code.  This code runs with address translation
+ * turned off, i.e. using physical addresses.
+ * We assume sprg3 has the physical address of the current
+ * task's thread_struct.
+ */
+#define EXCEPTION_PROLOG       \
+       mtspr   SPRN_SPRG0,r10; \
+       mtspr   SPRN_SPRG1,r11; \
+       mfcr    r10;            \
+       EXCEPTION_PROLOG_1;     \
+       EXCEPTION_PROLOG_2
+
+#define EXCEPTION_PROLOG_1     \
+       mfspr   r11,SPRN_SRR1;          /* check whether user or kernel */ \
+       andi.   r11,r11,MSR_PR; \
+       tophys(r11,r1);                 /* use tophys(r1) if kernel */ \
+       beq     1f;             \
+       mfspr   r11,SPRN_SPRG3; \
+       lwz     r11,THREAD_INFO-THREAD(r11);    \
+       addi    r11,r11,THREAD_SIZE;    \
+       tophys(r11,r11);        \
+1:     subi    r11,r11,INT_FRAME_SIZE  /* alloc exc. frame */
+
+
+#define EXCEPTION_PROLOG_2     \
+       CLR_TOP32(r11);         \
+       stw     r10,_CCR(r11);          /* save registers */ \
+       stw     r12,GPR12(r11); \
+       stw     r9,GPR9(r11);   \
+       mfspr   r10,SPRN_SPRG0; \
+       stw     r10,GPR10(r11); \
+       mfspr   r12,SPRN_SPRG1; \
+       stw     r12,GPR11(r11); \
+       mflr    r10;            \
+       stw     r10,_LINK(r11); \
+       mfspr   r12,SPRN_SRR0;  \
+       mfspr   r9,SPRN_SRR1;   \
+       stw     r1,GPR1(r11);   \
+       stw     r1,0(r11);      \
+       tovirt(r1,r11);                 /* set new kernel sp */ \
+       li      r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
+       MTMSRD(r10);                    /* (except for mach check in rtas) */ \
+       stw     r0,GPR0(r11);   \
+       SAVE_4GPRS(3, r11);     \
+       SAVE_2GPRS(7, r11)
+
+/*
+ * Note: code which follows this uses cr0.eq (set if from kernel),
+ * r11, r12 (SRR0), and r9 (SRR1).
+ *
+ * Note2: once we have set r1 we are in a position to take exceptions
+ * again, and we could thus set MSR:RI at that point.
+ */
+
+/*
+ * Exception vectors.
+ */
+#define EXCEPTION(n, label, hdlr, xfer)                \
+       . = n;                                  \
+label:                                         \
+       EXCEPTION_PROLOG;                       \
+       addi    r3,r1,STACK_FRAME_OVERHEAD;     \
+       xfer(n, hdlr)
+
+#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret)    \
+       li      r10,trap;                                       \
+       stw     r10,TRAP(r11);                                  \
+       li      r10,MSR_KERNEL;                                 \
+       copyee(r10, r9);                                        \
+       bl      tfer;                                           \
+i##n:                                                          \
+       .long   hdlr;                                           \
+       .long   ret
+
+#define COPY_EE(d, s)          rlwimi d,s,0,16,16
+#define NOCOPY(d, s)
+
+#define EXC_XFER_STD(n, hdlr)          \
+       EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
+                         ret_from_except_full)
+
+#define EXC_XFER_LITE(n, hdlr)         \
+       EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
+                         ret_from_except)
+
+#define EXC_XFER_EE(n, hdlr)           \
+       EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
+                         ret_from_except_full)
+
+#define EXC_XFER_EE_LITE(n, hdlr)      \
+       EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
+                         ret_from_except)
+
+/* System reset */
+/* core99 pmac starts the seconary here by changing the vector, and
+   putting it back to what it was (unknown_exception) when done.  */
+#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
+       . = 0x100
+       b       __secondary_start_gemini
+#else
+       EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
+#endif
+
+/* Machine check */
+/*
+ * On CHRP, this is complicated by the fact that we could get a
+ * machine check inside RTAS, and we have no guarantee that certain
+ * critical registers will have the values we expect.  The set of
+ * registers that might have bad values includes all the GPRs
+ * and all the BATs.  We indicate that we are in RTAS by putting
+ * a non-zero value, the address of the exception frame to use,
+ * in SPRG2.  The machine check handler checks SPRG2 and uses its
+ * value if it is non-zero.  If we ever needed to free up SPRG2,
+ * we could use a field in the thread_info or thread_struct instead.
+ * (Other exception handlers assume that r1 is a valid kernel stack
+ * pointer when we take an exception from supervisor mode.)
+ *     -- paulus.
+ */
+       . = 0x200
+       mtspr   SPRN_SPRG0,r10
+       mtspr   SPRN_SPRG1,r11
+       mfcr    r10
+#ifdef CONFIG_PPC_CHRP
+       mfspr   r11,SPRN_SPRG2
+       cmpwi   0,r11,0
+       bne     7f
+#endif /* CONFIG_PPC_CHRP */
+       EXCEPTION_PROLOG_1
+7:     EXCEPTION_PROLOG_2
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_CHRP
+       mfspr   r4,SPRN_SPRG2
+       cmpwi   cr1,r4,0
+       bne     cr1,1f
+#endif
+       EXC_XFER_STD(0x200, machine_check_exception)
+#ifdef CONFIG_PPC_CHRP
+1:     b       machine_check_in_rtas
+#endif
+
+/* Data access exception. */
+       . = 0x300
+DataAccess:
+       EXCEPTION_PROLOG
+       mfspr   r10,SPRN_DSISR
+       andis.  r0,r10,0xa470           /* weird error? */
+       bne     1f                      /* if not, try to put a PTE */
+       mfspr   r4,SPRN_DAR             /* into the hash table */
+       rlwinm  r3,r10,32-15,21,21      /* DSISR_STORE -> _PAGE_RW */
+       bl      hash_page
+1:     stw     r10,_DSISR(r11)
+       mr      r5,r10
+       mfspr   r4,SPRN_DAR
+       EXC_XFER_EE_LITE(0x300, handle_page_fault)
+
+
+/* Instruction access exception. */
+       . = 0x400
+InstructionAccess:
+       EXCEPTION_PROLOG
+       andis.  r0,r9,0x4000            /* no pte found? */
+       beq     1f                      /* if so, try to put a PTE */
+       li      r3,0                    /* into the hash table */
+       mr      r4,r12                  /* SRR0 is fault address */
+       bl      hash_page
+1:     mr      r4,r12
+       mr      r5,r9
+       EXC_XFER_EE_LITE(0x400, handle_page_fault)
+
+/* External interrupt */
+       EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+
+/* Alignment exception */
+       . = 0x600
+Alignment:
+       EXCEPTION_PROLOG
+       mfspr   r4,SPRN_DAR
+       stw     r4,_DAR(r11)
+       mfspr   r5,SPRN_DSISR
+       stw     r5,_DSISR(r11)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_EE(0x600, alignment_exception)
+
+/* Program check exception */
+       EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
+
+/* Floating-point unavailable */
+       . = 0x800
+FPUnavailable:
+       EXCEPTION_PROLOG
+       bne     load_up_fpu             /* if from user, just load it up */
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
+
+/* Decrementer */
+       EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
+
+       EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
+
+/* System call */
+       . = 0xc00
+SystemCall:
+       EXCEPTION_PROLOG
+       EXC_XFER_EE_LITE(0xc00, DoSyscall)
+
+/* Single step - not used on 601 */
+       EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
+       EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
+
+/*
+ * The Altivec unavailable trap is at 0x0f20.  Foo.
+ * We effectively remap it to 0x3000.
+ * We include an altivec unavailable exception vector even if
+ * not configured for Altivec, so that you can't panic a
+ * non-altivec kernel running on a machine with altivec just
+ * by executing an altivec instruction.
+ */
+       . = 0xf00
+       b       Trap_0f
+
+       . = 0xf20
+       b       AltiVecUnavailable
+
+Trap_0f:
+       EXCEPTION_PROLOG
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_EE(0xf00, unknown_exception)
+
+/*
+ * Handle TLB miss for instruction on 603/603e.
+ * Note: we get an alternate set of r0 - r3 to use automatically.
+ */
+       . = 0x1000
+InstructionTLBMiss:
+/*
+ * r0: stored ctr
+ * r1: linux style pte ( later becomes ppc hardware pte )
+ * r2: ptr to linux-style pte
+ * r3: scratch
+ */
+       mfctr   r0
+       /* Get PTE (linux-style) and check access */
+       mfspr   r3,SPRN_IMISS
+       lis     r1,KERNELBASE@h         /* check if kernel address */
+       cmplw   0,r3,r1
+       mfspr   r2,SPRN_SPRG3
+       li      r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
+       lwz     r2,PGDIR(r2)
+       blt+    112f
+       lis     r2,swapper_pg_dir@ha    /* if kernel address, use */
+       addi    r2,r2,swapper_pg_dir@l  /* kernel page table */
+       mfspr   r1,SPRN_SRR1            /* and MSR_PR bit from SRR1 */
+       rlwinm  r1,r1,32-12,29,29       /* shift MSR_PR to _PAGE_USER posn */
+112:   tophys(r2,r2)
+       rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
+       lwz     r2,0(r2)                /* get pmd entry */
+       rlwinm. r2,r2,0,0,19            /* extract address of pte page */
+       beq-    InstructionAddressInvalid       /* return if no mapping */
+       rlwimi  r2,r3,22,20,29          /* insert next 10 bits of address */
+       lwz     r3,0(r2)                /* get linux-style pte */
+       andc.   r1,r1,r3                /* check access & ~permission */
+       bne-    InstructionAddressInvalid /* return if access not permitted */
+       ori     r3,r3,_PAGE_ACCESSED    /* set _PAGE_ACCESSED in pte */
+       /*
+        * NOTE! We are assuming this is not an SMP system, otherwise
+        * we would need to update the pte atomically with lwarx/stwcx.
+        */
+       stw     r3,0(r2)                /* update PTE (accessed bit) */
+       /* Convert linux-style PTE to low word of PPC-style PTE */
+       rlwinm  r1,r3,32-10,31,31       /* _PAGE_RW -> PP lsb */
+       rlwinm  r2,r3,32-7,31,31        /* _PAGE_DIRTY -> PP lsb */
+       and     r1,r1,r2                /* writable if _RW and _DIRTY */
+       rlwimi  r3,r3,32-1,30,30        /* _PAGE_USER -> PP msb */
+       rlwimi  r3,r3,32-1,31,31        /* _PAGE_USER -> PP lsb */
+       ori     r1,r1,0xe14             /* clear out reserved bits and M */
+       andc    r1,r3,r1                /* PP = user? (rw&dirty? 2: 3): 0 */
+       mtspr   SPRN_RPA,r1
+       mfspr   r3,SPRN_IMISS
+       tlbli   r3
+       mfspr   r3,SPRN_SRR1            /* Need to restore CR0 */
+       mtcrf   0x80,r3
+       rfi
+InstructionAddressInvalid:
+       mfspr   r3,SPRN_SRR1
+       rlwinm  r1,r3,9,6,6     /* Get load/store bit */
+
+       addis   r1,r1,0x2000
+       mtspr   SPRN_DSISR,r1   /* (shouldn't be needed) */
+       mtctr   r0              /* Restore CTR */
+       andi.   r2,r3,0xFFFF    /* Clear upper bits of SRR1 */
+       or      r2,r2,r1
+       mtspr   SPRN_SRR1,r2
+       mfspr   r1,SPRN_IMISS   /* Get failing address */
+       rlwinm. r2,r2,0,31,31   /* Check for little endian access */
+       rlwimi  r2,r2,1,30,30   /* change 1 -> 3 */
+       xor     r1,r1,r2
+       mtspr   SPRN_DAR,r1     /* Set fault address */
+       mfmsr   r0              /* Restore "normal" registers */
+       xoris   r0,r0,MSR_TGPR>>16
+       mtcrf   0x80,r3         /* Restore CR0 */
+       mtmsr   r0
+       b       InstructionAccess
+
+/*
+ * Handle TLB miss for DATA Load operation on 603/603e
+ */
+       . = 0x1100
+DataLoadTLBMiss:
+/*
+ * r0: stored ctr
+ * r1: linux style pte ( later becomes ppc hardware pte )
+ * r2: ptr to linux-style pte
+ * r3: scratch
+ */
+       mfctr   r0
+       /* Get PTE (linux-style) and check access */
+       mfspr   r3,SPRN_DMISS
+       lis     r1,KERNELBASE@h         /* check if kernel address */
+       cmplw   0,r3,r1
+       mfspr   r2,SPRN_SPRG3
+       li      r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
+       lwz     r2,PGDIR(r2)
+       blt+    112f
+       lis     r2,swapper_pg_dir@ha    /* if kernel address, use */
+       addi    r2,r2,swapper_pg_dir@l  /* kernel page table */
+       mfspr   r1,SPRN_SRR1            /* and MSR_PR bit from SRR1 */
+       rlwinm  r1,r1,32-12,29,29       /* shift MSR_PR to _PAGE_USER posn */
+112:   tophys(r2,r2)
+       rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
+       lwz     r2,0(r2)                /* get pmd entry */
+       rlwinm. r2,r2,0,0,19            /* extract address of pte page */
+       beq-    DataAddressInvalid      /* return if no mapping */
+       rlwimi  r2,r3,22,20,29          /* insert next 10 bits of address */
+       lwz     r3,0(r2)                /* get linux-style pte */
+       andc.   r1,r1,r3                /* check access & ~permission */
+       bne-    DataAddressInvalid      /* return if access not permitted */
+       ori     r3,r3,_PAGE_ACCESSED    /* set _PAGE_ACCESSED in pte */
+       /*
+        * NOTE! We are assuming this is not an SMP system, otherwise
+        * we would need to update the pte atomically with lwarx/stwcx.
+        */
+       stw     r3,0(r2)                /* update PTE (accessed bit) */
+       /* Convert linux-style PTE to low word of PPC-style PTE */
+       rlwinm  r1,r3,32-10,31,31       /* _PAGE_RW -> PP lsb */
+       rlwinm  r2,r3,32-7,31,31        /* _PAGE_DIRTY -> PP lsb */
+       and     r1,r1,r2                /* writable if _RW and _DIRTY */
+       rlwimi  r3,r3,32-1,30,30        /* _PAGE_USER -> PP msb */
+       rlwimi  r3,r3,32-1,31,31        /* _PAGE_USER -> PP lsb */
+       ori     r1,r1,0xe14             /* clear out reserved bits and M */
+       andc    r1,r3,r1                /* PP = user? (rw&dirty? 2: 3): 0 */
+       mtspr   SPRN_RPA,r1
+       mfspr   r3,SPRN_DMISS
+       tlbld   r3
+       mfspr   r3,SPRN_SRR1            /* Need to restore CR0 */
+       mtcrf   0x80,r3
+       rfi
+DataAddressInvalid:
+       mfspr   r3,SPRN_SRR1
+       rlwinm  r1,r3,9,6,6     /* Get load/store bit */
+       addis   r1,r1,0x2000
+       mtspr   SPRN_DSISR,r1
+       mtctr   r0              /* Restore CTR */
+       andi.   r2,r3,0xFFFF    /* Clear upper bits of SRR1 */
+       mtspr   SPRN_SRR1,r2
+       mfspr   r1,SPRN_DMISS   /* Get failing address */
+       rlwinm. r2,r2,0,31,31   /* Check for little endian access */
+       beq     20f             /* Jump if big endian */
+       xori    r1,r1,3
+20:    mtspr   SPRN_DAR,r1     /* Set fault address */
+       mfmsr   r0              /* Restore "normal" registers */
+       xoris   r0,r0,MSR_TGPR>>16
+       mtcrf   0x80,r3         /* Restore CR0 */
+       mtmsr   r0
+       b       DataAccess
+
+/*
+ * Handle TLB miss for DATA Store on 603/603e
+ */
+       . = 0x1200
+DataStoreTLBMiss:
+/*
+ * r0: stored ctr
+ * r1: linux style pte ( later becomes ppc hardware pte )
+ * r2: ptr to linux-style pte
+ * r3: scratch
+ */
+       mfctr   r0
+       /* Get PTE (linux-style) and check access */
+       mfspr   r3,SPRN_DMISS
+       lis     r1,KERNELBASE@h         /* check if kernel address */
+       cmplw   0,r3,r1
+       mfspr   r2,SPRN_SPRG3
+       li      r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
+       lwz     r2,PGDIR(r2)
+       blt+    112f
+       lis     r2,swapper_pg_dir@ha    /* if kernel address, use */
+       addi    r2,r2,swapper_pg_dir@l  /* kernel page table */
+       mfspr   r1,SPRN_SRR1            /* and MSR_PR bit from SRR1 */
+       rlwinm  r1,r1,32-12,29,29       /* shift MSR_PR to _PAGE_USER posn */
+112:   tophys(r2,r2)
+       rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
+       lwz     r2,0(r2)                /* get pmd entry */
+       rlwinm. r2,r2,0,0,19            /* extract address of pte page */
+       beq-    DataAddressInvalid      /* return if no mapping */
+       rlwimi  r2,r3,22,20,29          /* insert next 10 bits of address */
+       lwz     r3,0(r2)                /* get linux-style pte */
+       andc.   r1,r1,r3                /* check access & ~permission */
+       bne-    DataAddressInvalid      /* return if access not permitted */
+       ori     r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY
+       /*
+        * NOTE! We are assuming this is not an SMP system, otherwise
+        * we would need to update the pte atomically with lwarx/stwcx.
+        */
+       stw     r3,0(r2)                /* update PTE (accessed/dirty bits) */
+       /* Convert linux-style PTE to low word of PPC-style PTE */
+       rlwimi  r3,r3,32-1,30,30        /* _PAGE_USER -> PP msb */
+       li      r1,0xe15                /* clear out reserved bits and M */
+       andc    r1,r3,r1                /* PP = user? 2: 0 */
+       mtspr   SPRN_RPA,r1
+       mfspr   r3,SPRN_DMISS
+       tlbld   r3
+       mfspr   r3,SPRN_SRR1            /* Need to restore CR0 */
+       mtcrf   0x80,r3
+       rfi
+
+#ifndef CONFIG_ALTIVEC
+#define altivec_assist_exception       unknown_exception
+#endif
+
+       EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
+       EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
+       EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
+       EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
+       EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
+       EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
+
+       .globl mol_trampoline
+       .set mol_trampoline, i0x2f00
+
+       . = 0x3000
+
+AltiVecUnavailable:
+       EXCEPTION_PROLOG
+#ifdef CONFIG_ALTIVEC
+       bne     load_up_altivec         /* if from user, just load it up */
+#endif /* CONFIG_ALTIVEC */
+       EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
+
+#ifdef CONFIG_ALTIVEC
+/* Note that the AltiVec support is closely modeled after the FP
+ * support.  Changes to one are likely to be applicable to the
+ * other!  */
+load_up_altivec:
+/*
+ * Disable AltiVec for the task which had AltiVec previously,
+ * and save its AltiVec registers in its thread_struct.
+ * Enables AltiVec for use in the kernel on return.
+ * On SMP we know the AltiVec units are free, since we give it up every
+ * switch.  -- Kumar
+ */
+       mfmsr   r5
+       oris    r5,r5,MSR_VEC@h
+       MTMSRD(r5)                      /* enable use of AltiVec now */
+       isync
+/*
+ * For SMP, we don't do lazy AltiVec switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another.  Instead we call giveup_altivec in switch_to.
+ */
+#ifndef CONFIG_SMP
+       tophys(r6,0)
+       addis   r3,r6,last_task_used_altivec@ha
+       lwz     r4,last_task_used_altivec@l(r3)
+       cmpwi   0,r4,0
+       beq     1f
+       add     r4,r4,r6
+       addi    r4,r4,THREAD    /* want THREAD of last_task_used_altivec */
+       SAVE_32VRS(0,r10,r4)
+       mfvscr  vr0
+       li      r10,THREAD_VSCR
+       stvx    vr0,r10,r4
+       lwz     r5,PT_REGS(r4)
+       add     r5,r5,r6
+       lwz     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       lis     r10,MSR_VEC@h
+       andc    r4,r4,r10       /* disable altivec for previous task */
+       stw     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+       /* enable use of AltiVec after return */
+       oris    r9,r9,MSR_VEC@h
+       mfspr   r5,SPRN_SPRG3           /* current task's THREAD (phys) */
+       li      r4,1
+       li      r10,THREAD_VSCR
+       stw     r4,THREAD_USED_VR(r5)
+       lvx     vr0,r10,r5
+       mtvscr  vr0
+       REST_32VRS(0,r10,r5)
+#ifndef CONFIG_SMP
+       subi    r4,r5,THREAD
+       sub     r4,r4,r6
+       stw     r4,last_task_used_altivec@l(r3)
+#endif /* CONFIG_SMP */
+       /* restore registers and return */
+       /* we haven't used ctr or xer or lr */
+       b       fast_exception_return
+
+/*
+ * AltiVec unavailable trap from kernel - print a message, but let
+ * the task use AltiVec in the kernel until it returns to user mode.
+ */
+KernelAltiVec:
+       lwz     r3,_MSR(r1)
+       oris    r3,r3,MSR_VEC@h
+       stw     r3,_MSR(r1)     /* enable use of AltiVec after return */
+       lis     r3,87f@h
+       ori     r3,r3,87f@l
+       mr      r4,r2           /* current */
+       lwz     r5,_NIP(r1)
+       bl      printk
+       b       ret_from_except
+87:    .string "AltiVec used in kernel  (task=%p, pc=%x)  \n"
+       .align  4,0
+
+/*
+ * giveup_altivec(tsk)
+ * Disable AltiVec for the task given as the argument,
+ * and save the AltiVec registers in its thread_struct.
+ * Enables AltiVec for use in the kernel on return.
+ */
+
+       .globl  giveup_altivec
+giveup_altivec:
+       mfmsr   r5
+       oris    r5,r5,MSR_VEC@h
+       SYNC
+       MTMSRD(r5)                      /* enable use of AltiVec now */
+       isync
+       cmpwi   0,r3,0
+       beqlr-                          /* if no previous owner, done */
+       addi    r3,r3,THREAD            /* want THREAD of task */
+       lwz     r5,PT_REGS(r3)
+       cmpwi   0,r5,0
+       SAVE_32VRS(0, r4, r3)
+       mfvscr  vr0
+       li      r4,THREAD_VSCR
+       stvx    vr0,r4,r3
+       beq     1f
+       lwz     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       lis     r3,MSR_VEC@h
+       andc    r4,r4,r3                /* disable AltiVec for previous task */
+       stw     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+       li      r5,0
+       lis     r4,last_task_used_altivec@ha
+       stw     r5,last_task_used_altivec@l(r4)
+#endif /* CONFIG_SMP */
+       blr
+#endif /* CONFIG_ALTIVEC */
+
+/*
+ * This code is jumped to from the startup code to copy
+ * the kernel image to physical address 0.
+ */
+relocate_kernel:
+       addis   r9,r26,klimit@ha        /* fetch klimit */
+       lwz     r25,klimit@l(r9)
+       addis   r25,r25,-KERNELBASE@h
+       li      r3,0                    /* Destination base address */
+       li      r6,0                    /* Destination offset */
+       li      r5,0x4000               /* # bytes of memory to copy */
+       bl      copy_and_flush          /* copy the first 0x4000 bytes */
+       addi    r0,r3,4f@l              /* jump to the address of 4f */
+       mtctr   r0                      /* in copy and do the rest. */
+       bctr                            /* jump to the copy */
+4:     mr      r5,r25
+       bl      copy_and_flush          /* copy the rest */
+       b       turn_on_mmu
+
+/*
+ * Copy routine used to copy the kernel to start at physical address 0
+ * and flush and invalidate the caches as needed.
+ * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
+ * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
+ */
+_GLOBAL(copy_and_flush)
+       addi    r5,r5,-4
+       addi    r6,r6,-4
+4:     li      r0,L1_CACHE_BYTES/4
+       mtctr   r0
+3:     addi    r6,r6,4                 /* copy a cache line */
+       lwzx    r0,r6,r4
+       stwx    r0,r6,r3
+       bdnz    3b
+       dcbst   r6,r3                   /* write it to memory */
+       sync
+       icbi    r6,r3                   /* flush the icache line */
+       cmplw   0,r6,r5
+       blt     4b
+       sync                            /* additional sync needed on g4 */
+       isync
+       addi    r5,r5,4
+       addi    r6,r6,4
+       blr
+
+#ifdef CONFIG_APUS
+/*
+ * On APUS the physical base address of the kernel is not known at compile
+ * time, which means the __pa/__va constants used are incorrect. In the
+ * __init section is recorded the virtual addresses of instructions using
+ * these constants, so all that has to be done is fix these before
+ * continuing the kernel boot.
+ *
+ * r4 = The physical address of the kernel base.
+ */
+fix_mem_constants:
+       mr      r10,r4
+       addis   r10,r10,-KERNELBASE@h    /* virt_to_phys constant */
+       neg     r11,r10                  /* phys_to_virt constant */
+
+       lis     r12,__vtop_table_begin@h
+       ori     r12,r12,__vtop_table_begin@l
+       add     r12,r12,r10              /* table begin phys address */
+       lis     r13,__vtop_table_end@h
+       ori     r13,r13,__vtop_table_end@l
+       add     r13,r13,r10              /* table end phys address */
+       subi    r12,r12,4
+       subi    r13,r13,4
+1:     lwzu    r14,4(r12)               /* virt address of instruction */
+       add     r14,r14,r10              /* phys address of instruction */
+       lwz     r15,0(r14)               /* instruction, now insert top */
+       rlwimi  r15,r10,16,16,31         /* half of vp const in low half */
+       stw     r15,0(r14)               /* of instruction and restore. */
+       dcbst   r0,r14                   /* write it to memory */
+       sync
+       icbi    r0,r14                   /* flush the icache line */
+       cmpw    r12,r13
+       bne     1b
+       sync                            /* additional sync needed on g4 */
+       isync
+
+/*
+ * Map the memory where the exception handlers will
+ * be copied to when hash constants have been patched.
+ */
+#ifdef CONFIG_APUS_FAST_EXCEPT
+       lis     r8,0xfff0
+#else
+       lis     r8,0
+#endif
+       ori     r8,r8,0x2               /* 128KB, supervisor */
+       mtspr   SPRN_DBAT3U,r8
+       mtspr   SPRN_DBAT3L,r8
+
+       lis     r12,__ptov_table_begin@h
+       ori     r12,r12,__ptov_table_begin@l
+       add     r12,r12,r10              /* table begin phys address */
+       lis     r13,__ptov_table_end@h
+       ori     r13,r13,__ptov_table_end@l
+       add     r13,r13,r10              /* table end phys address */
+       subi    r12,r12,4
+       subi    r13,r13,4
+1:     lwzu    r14,4(r12)               /* virt address of instruction */
+       add     r14,r14,r10              /* phys address of instruction */
+       lwz     r15,0(r14)               /* instruction, now insert top */
+       rlwimi  r15,r11,16,16,31         /* half of pv const in low half*/
+       stw     r15,0(r14)               /* of instruction and restore. */
+       dcbst   r0,r14                   /* write it to memory */
+       sync
+       icbi    r0,r14                   /* flush the icache line */
+       cmpw    r12,r13
+       bne     1b
+
+       sync                            /* additional sync needed on g4 */
+       isync                           /* No speculative loading until now */
+       blr
+
+/***********************************************************************
+ *  Please note that on APUS the exception handlers are located at the
+ *  physical address 0xfff0000. For this reason, the exception handlers
+ *  cannot use relative branches to access the code below.
+ ***********************************************************************/
+#endif /* CONFIG_APUS */
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_GEMINI
+       .globl  __secondary_start_gemini
+__secondary_start_gemini:
+        mfspr   r4,SPRN_HID0
+        ori     r4,r4,HID0_ICFI
+        li      r3,0
+        ori     r3,r3,HID0_ICE
+        andc    r4,r4,r3
+        mtspr   SPRN_HID0,r4
+        sync
+        b       __secondary_start
+#endif /* CONFIG_GEMINI */
+
+       .globl  __secondary_start_pmac_0
+__secondary_start_pmac_0:
+       /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
+       li      r24,0
+       b       1f
+       li      r24,1
+       b       1f
+       li      r24,2
+       b       1f
+       li      r24,3
+1:
+       /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
+          set to map the 0xf0000000 - 0xffffffff region */
+       mfmsr   r0
+       rlwinm  r0,r0,0,28,26           /* clear DR (0x10) */
+       SYNC
+       mtmsr   r0
+       isync
+
+       .globl  __secondary_start
+__secondary_start:
+       /* Copy some CPU settings from CPU 0 */
+       bl      __restore_cpu_setup
+
+       lis     r3,-KERNELBASE@h
+       mr      r4,r24
+       bl      call_setup_cpu          /* Call setup_cpu for this CPU */
+#ifdef CONFIG_6xx
+       lis     r3,-KERNELBASE@h
+       bl      init_idle_6xx
+#endif /* CONFIG_6xx */
+
+       /* get current_thread_info and current */
+       lis     r1,secondary_ti@ha
+       tophys(r1,r1)
+       lwz     r1,secondary_ti@l(r1)
+       tophys(r2,r1)
+       lwz     r2,TI_TASK(r2)
+
+       /* stack */
+       addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+       li      r0,0
+       tophys(r3,r1)
+       stw     r0,0(r3)
+
+       /* load up the MMU */
+       bl      load_up_mmu
+
+       /* ptr to phys current thread */
+       tophys(r4,r2)
+       addi    r4,r4,THREAD    /* phys address of our thread_struct */
+       CLR_TOP32(r4)
+       mtspr   SPRN_SPRG3,r4
+       li      r3,0
+       mtspr   SPRN_SPRG2,r3   /* 0 => not in RTAS */
+
+       /* enable MMU and jump to start_secondary */
+       li      r4,MSR_KERNEL
+       FIX_SRR1(r4,r5)
+       lis     r3,start_secondary@h
+       ori     r3,r3,start_secondary@l
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
+       SYNC
+       RFI
+#endif /* CONFIG_SMP */
+
+/*
+ * Those generic dummy functions are kept for CPUs not
+ * included in CONFIG_6xx
+ */
+#if !defined(CONFIG_6xx)
+_GLOBAL(__save_cpu_setup)
+       blr
+_GLOBAL(__restore_cpu_setup)
+       blr
+#endif /* !defined(CONFIG_6xx) */
+
+
+/*
+ * Load stuff into the MMU.  Intended to be called with
+ * IR=0 and DR=0.
+ */
+load_up_mmu:
+       sync                    /* Force all PTE updates to finish */
+       isync
+       tlbia                   /* Clear all TLB entries */
+       sync                    /* wait for tlbia/tlbie to finish */
+       TLBSYNC                 /* ... on all CPUs */
+       /* Load the SDR1 register (hash table base & size) */
+       lis     r6,_SDR1@ha
+       tophys(r6,r6)
+       lwz     r6,_SDR1@l(r6)
+       mtspr   SPRN_SDR1,r6
+       li      r0,16           /* load up segment register values */
+       mtctr   r0              /* for context 0 */
+       lis     r3,0x2000       /* Ku = 1, VSID = 0 */
+       li      r4,0
+3:     mtsrin  r3,r4
+       addi    r3,r3,0x111     /* increment VSID */
+       addis   r4,r4,0x1000    /* address of next segment */
+       bdnz    3b
+
+/* Load the BAT registers with the values set up by MMU_init.
+   MMU_init takes care of whether we're on a 601 or not. */
+       mfpvr   r3
+       srwi    r3,r3,16
+       cmpwi   r3,1
+       lis     r3,BATS@ha
+       addi    r3,r3,BATS@l
+       tophys(r3,r3)
+       LOAD_BAT(0,r3,r4,r5)
+       LOAD_BAT(1,r3,r4,r5)
+       LOAD_BAT(2,r3,r4,r5)
+       LOAD_BAT(3,r3,r4,r5)
+
+       blr
+
+/*
+ * This is where the main kernel code starts.
+ */
+start_here:
+       /* ptr to current */
+       lis     r2,init_task@h
+       ori     r2,r2,init_task@l
+       /* Set up for using our exception vectors */
+       /* ptr to phys current thread */
+       tophys(r4,r2)
+       addi    r4,r4,THREAD    /* init task's THREAD */
+       CLR_TOP32(r4)
+       mtspr   SPRN_SPRG3,r4
+       li      r3,0
+       mtspr   SPRN_SPRG2,r3   /* 0 => not in RTAS */
+
+       /* stack */
+       lis     r1,init_thread_union@ha
+       addi    r1,r1,init_thread_union@l
+       li      r0,0
+       stwu    r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+/*
+ * Do early platform-specific initialization,
+ * and set up the MMU.
+ */
+       mr      r3,r31
+       mr      r4,r30
+       bl      machine_init
+       bl      MMU_init
+
+#ifdef CONFIG_APUS
+       /* Copy exception code to exception vector base on APUS. */
+       lis     r4,KERNELBASE@h
+#ifdef CONFIG_APUS_FAST_EXCEPT
+       lis     r3,0xfff0               /* Copy to 0xfff00000 */
+#else
+       lis     r3,0                    /* Copy to 0x00000000 */
+#endif
+       li      r5,0x4000               /* # bytes of memory to copy */
+       li      r6,0
+       bl      copy_and_flush          /* copy the first 0x4000 bytes */
+#endif  /* CONFIG_APUS */
+
+/*
+ * Go back to running unmapped so we can load up new values
+ * for SDR1 (hash table pointer) and the segment registers
+ * and change to using our exception vectors.
+ */
+       lis     r4,2f@h
+       ori     r4,r4,2f@l
+       tophys(r4,r4)
+       li      r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+       FIX_SRR1(r3,r5)
+       mtspr   SPRN_SRR0,r4
+       mtspr   SPRN_SRR1,r3
+       SYNC
+       RFI
+/* Load up the kernel context */
+2:     bl      load_up_mmu
+
+#ifdef CONFIG_BDI_SWITCH
+       /* Add helper information for the Abatron bdiGDB debugger.
+        * We do this here because we know the mmu is disabled, and
+        * will be enabled for real in just a few instructions.
+        */
+       lis     r5, abatron_pteptrs@h
+       ori     r5, r5, abatron_pteptrs@l
+       stw     r5, 0xf0(r0)    /* This much match your Abatron config */
+       lis     r6, swapper_pg_dir@h
+       ori     r6, r6, swapper_pg_dir@l
+       tophys(r5, r5)
+       stw     r6, 0(r5)
+#endif /* CONFIG_BDI_SWITCH */
+
+/* Now turn on the MMU for real! */
+       li      r4,MSR_KERNEL
+       FIX_SRR1(r4,r5)
+       lis     r3,start_kernel@h
+       ori     r3,r3,start_kernel@l
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
+       SYNC
+       RFI
+
+/*
+ * Set up the segment registers for a new context.
+ */
+_GLOBAL(set_context)
+       mulli   r3,r3,897       /* multiply context by skew factor */
+       rlwinm  r3,r3,4,8,27    /* VSID = (context & 0xfffff) << 4 */
+       addis   r3,r3,0x6000    /* Set Ks, Ku bits */
+       li      r0,NUM_USER_SEGMENTS
+       mtctr   r0
+
+#ifdef CONFIG_BDI_SWITCH
+       /* Context switch the PTE pointer for the Abatron BDI2000.
+        * The PGDIR is passed as second argument.
+        */
+       lis     r5, KERNELBASE@h
+       lwz     r5, 0xf0(r5)
+       stw     r4, 0x4(r5)
+#endif
+       li      r4,0
+       isync
+3:
+       mtsrin  r3,r4
+       addi    r3,r3,0x111     /* next VSID */
+       rlwinm  r3,r3,0,8,3     /* clear out any overflow from VSID field */
+       addis   r4,r4,0x1000    /* address of next segment */
+       bdnz    3b
+       sync
+       isync
+       blr
+
+/*
+ * An undocumented "feature" of 604e requires that the v bit
+ * be cleared before changing BAT values.
+ *
+ * Also, newer IBM firmware does not clear bat3 and 4 so
+ * this makes sure it's done.
+ *  -- Cort
+ */
+clear_bats:
+       li      r10,0
+       mfspr   r9,SPRN_PVR
+       rlwinm  r9,r9,16,16,31          /* r9 = 1 for 601, 4 for 604 */
+       cmpwi   r9, 1
+       beq     1f
+
+       mtspr   SPRN_DBAT0U,r10
+       mtspr   SPRN_DBAT0L,r10
+       mtspr   SPRN_DBAT1U,r10
+       mtspr   SPRN_DBAT1L,r10
+       mtspr   SPRN_DBAT2U,r10
+       mtspr   SPRN_DBAT2L,r10
+       mtspr   SPRN_DBAT3U,r10
+       mtspr   SPRN_DBAT3L,r10
+1:
+       mtspr   SPRN_IBAT0U,r10
+       mtspr   SPRN_IBAT0L,r10
+       mtspr   SPRN_IBAT1U,r10
+       mtspr   SPRN_IBAT1L,r10
+       mtspr   SPRN_IBAT2U,r10
+       mtspr   SPRN_IBAT2L,r10
+       mtspr   SPRN_IBAT3U,r10
+       mtspr   SPRN_IBAT3L,r10
+BEGIN_FTR_SECTION
+       /* Here's a tweak: at this point, CPU setup have
+        * not been called yet, so HIGH_BAT_EN may not be
+        * set in HID0 for the 745x processors. However, it
+        * seems that doesn't affect our ability to actually
+        * write to these SPRs.
+        */
+       mtspr   SPRN_DBAT4U,r10
+       mtspr   SPRN_DBAT4L,r10
+       mtspr   SPRN_DBAT5U,r10
+       mtspr   SPRN_DBAT5L,r10
+       mtspr   SPRN_DBAT6U,r10
+       mtspr   SPRN_DBAT6L,r10
+       mtspr   SPRN_DBAT7U,r10
+       mtspr   SPRN_DBAT7L,r10
+       mtspr   SPRN_IBAT4U,r10
+       mtspr   SPRN_IBAT4L,r10
+       mtspr   SPRN_IBAT5U,r10
+       mtspr   SPRN_IBAT5L,r10
+       mtspr   SPRN_IBAT6U,r10
+       mtspr   SPRN_IBAT6L,r10
+       mtspr   SPRN_IBAT7U,r10
+       mtspr   SPRN_IBAT7L,r10
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+       blr
+
+flush_tlbs:
+       lis     r10, 0x40
+1:     addic.  r10, r10, -0x1000
+       tlbie   r10
+       blt     1b
+       sync
+       blr
+
+mmu_off:
+       addi    r4, r3, __after_mmu_off - _start
+       mfmsr   r3
+       andi.   r0,r3,MSR_DR|MSR_IR             /* MMU enabled? */
+       beqlr
+       andc    r3,r3,r0
+       mtspr   SPRN_SRR0,r4
+       mtspr   SPRN_SRR1,r3
+       sync
+       RFI
+
+/*
+ * Use the first pair of BAT registers to map the 1st 16MB
+ * of RAM to KERNELBASE.  From this point on we can't safely
+ * call OF any more.
+ */
+initial_bats:
+       lis     r11,KERNELBASE@h
+       mfspr   r9,SPRN_PVR
+       rlwinm  r9,r9,16,16,31          /* r9 = 1 for 601, 4 for 604 */
+       cmpwi   0,r9,1
+       bne     4f
+       ori     r11,r11,4               /* set up BAT registers for 601 */
+       li      r8,0x7f                 /* valid, block length = 8MB */
+       oris    r9,r11,0x800000@h       /* set up BAT reg for 2nd 8M */
+       oris    r10,r8,0x800000@h       /* set up BAT reg for 2nd 8M */
+       mtspr   SPRN_IBAT0U,r11         /* N.B. 601 has valid bit in */
+       mtspr   SPRN_IBAT0L,r8          /* lower BAT register */
+       mtspr   SPRN_IBAT1U,r9
+       mtspr   SPRN_IBAT1L,r10
+       isync
+       blr
+
+4:     tophys(r8,r11)
+#ifdef CONFIG_SMP
+       ori     r8,r8,0x12              /* R/W access, M=1 */
+#else
+       ori     r8,r8,2                 /* R/W access */
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_APUS
+       ori     r11,r11,BL_8M<<2|0x2    /* set up 8MB BAT registers for 604 */
+#else
+       ori     r11,r11,BL_256M<<2|0x2  /* set up BAT registers for 604 */
+#endif /* CONFIG_APUS */
+
+       mtspr   SPRN_DBAT0L,r8          /* N.B. 6xx (not 601) have valid */
+       mtspr   SPRN_DBAT0U,r11         /* bit in upper BAT register */
+       mtspr   SPRN_IBAT0L,r8
+       mtspr   SPRN_IBAT0U,r11
+       isync
+       blr
+
+
+#ifdef CONFIG_8260
+/* Jump into the system reset for the rom.
+ * We first disable the MMU, and then jump to the ROM reset address.
+ *
+ * r3 is the board info structure, r4 is the location for starting.
+ * I use this for building a small kernel that can load other kernels,
+ * rather than trying to write or rely on a rom monitor that can tftp load.
+ */
+       .globl  m8260_gorom
+m8260_gorom:
+       mfmsr   r0
+       rlwinm  r0,r0,0,17,15   /* clear MSR_EE in r0 */
+       sync
+       mtmsr   r0
+       sync
+       mfspr   r11, SPRN_HID0
+       lis     r10, 0
+       ori     r10,r10,HID0_ICE|HID0_DCE
+       andc    r11, r11, r10
+       mtspr   SPRN_HID0, r11
+       isync
+       li      r5, MSR_ME|MSR_RI
+       lis     r6,2f@h
+       addis   r6,r6,-KERNELBASE@h
+       ori     r6,r6,2f@l
+       mtspr   SPRN_SRR0,r6
+       mtspr   SPRN_SRR1,r5
+       isync
+       sync
+       rfi
+2:
+       mtlr    r4
+       blr
+#endif
+
+
+/*
+ * We put a few things here that have to be page-aligned.
+ * This stuff goes at the beginning of the data segment,
+ * which is page-aligned.
+ */
+       .data
+       .globl  sdata
+sdata:
+       .globl  empty_zero_page
+empty_zero_page:
+       .space  4096
+
+       .globl  swapper_pg_dir
+swapper_pg_dir:
+       .space  4096
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * Used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+       .globl  cmd_line
+cmd_line:
+       .space  512
+
+       .globl intercept_table
+intercept_table:
+       .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
+       .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
+       .long 0, 0, 0, i0x1300, 0, 0, 0, 0
+       .long 0, 0, 0, 0, 0, 0, 0, 0
+       .long 0, 0, 0, 0, 0, 0, 0, 0
+       .long 0, 0, 0, 0, 0, 0, 0, 0
+
+/* Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+abatron_pteptrs:
+       .space  8
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
new file mode 100644 (file)
index 0000000..8b49679
--- /dev/null
@@ -0,0 +1,782 @@
+/*
+ * arch/ppc/kernel/head_44x.S
+ *
+ * Kernel execution entry point code.
+ *
+ *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
+ *      Initial PowerPC version.
+ *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *      Rewritten for PReP
+ *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ *      Low-level exception handers, MMU support, and rewrite.
+ *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
+ *      PowerPC 8xx modifications.
+ *    Copyright (c) 1998-1999 TiVo, Inc.
+ *      PowerPC 403GCX modifications.
+ *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ *      PowerPC 403GCX/405GP modifications.
+ *    Copyright 2000 MontaVista Software Inc.
+ *     PPC405 modifications
+ *      PowerPC 403GCX/405GP modifications.
+ *     Author: MontaVista Software, Inc.
+ *             frank_rowand@mvista.com or source@mvista.com
+ *             debbie_chu@mvista.com
+ *    Copyright 2002-2005 MontaVista Software, Inc.
+ *      PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/ibm4xx.h>
+#include <asm/ibm44x.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include "head_booke.h"
+
+
+/* As with the other PowerPC ports, it is expected that when code
+ * execution begins here, the following registers contain valid, yet
+ * optional, information:
+ *
+ *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
+ *   r4 - Starting address of the init RAM disk
+ *   r5 - Ending address of the init RAM disk
+ *   r6 - Start of kernel command line string (e.g. "mem=128")
+ *   r7 - End of kernel command line string
+ *
+ */
+       .text
+_GLOBAL(_stext)
+_GLOBAL(_start)
+       /*
+        * Reserve a word at a fixed location to store the address
+        * of abatron_pteptrs
+        */
+       nop
+/*
+ * Save parameters we are passed
+ */
+       mr      r31,r3
+       mr      r30,r4
+       mr      r29,r5
+       mr      r28,r6
+       mr      r27,r7
+       li      r24,0           /* CPU number */
+
+/*
+ * Set up the initial MMU state
+ *
+ * We are still executing code at the virtual address
+ * mappings set by the firmware for the base of RAM.
+ *
+ * We first invalidate all TLB entries but the one
+ * we are running from.  We then load the KERNELBASE
+ * mappings so we can begin to use kernel addresses
+ * natively and so the interrupt vector locations are
+ * permanently pinned (necessary since Book E
+ * implementations always have translation enabled).
+ *
+ * TODO: Use the known TLB entry we are running from to
+ *      determine which physical region we are located
+ *      in.  This can be used to determine where in RAM
+ *      (on a shared CPU system) or PCI memory space
+ *      (on a DRAMless system) we are located.
+ *       For now, we assume a perfect world which means
+ *      we are located at the base of DRAM (physical 0).
+ */
+
+/*
+ * Search TLB for entry that we are currently using.
+ * Invalidate all entries but the one we are using.
+ */
+       /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
+       mfspr   r3,SPRN_PID                     /* Get PID */
+       mfmsr   r4                              /* Get MSR */
+       andi.   r4,r4,MSR_IS@l                  /* TS=1? */
+       beq     wmmucr                          /* If not, leave STS=0 */
+       oris    r3,r3,PPC44x_MMUCR_STS@h        /* Set STS=1 */
+wmmucr:        mtspr   SPRN_MMUCR,r3                   /* Put MMUCR */
+       sync
+
+       bl      invstr                          /* Find our address */
+invstr:        mflr    r5                              /* Make it accessible */
+       tlbsx   r23,0,r5                        /* Find entry we are in */
+       li      r4,0                            /* Start at TLB entry 0 */
+       li      r3,0                            /* Set PAGEID inval value */
+1:     cmpw    r23,r4                          /* Is this our entry? */
+       beq     skpinv                          /* If so, skip the inval */
+       tlbwe   r3,r4,PPC44x_TLB_PAGEID         /* If not, inval the entry */
+skpinv:        addi    r4,r4,1                         /* Increment */
+       cmpwi   r4,64                           /* Are we done? */
+       bne     1b                              /* If not, repeat */
+       isync                                   /* If so, context change */
+
+/*
+ * Configure and load pinned entry into TLB slot 63.
+ */
+
+       lis     r3,KERNELBASE@h         /* Load the kernel virtual address */
+       ori     r3,r3,KERNELBASE@l
+
+       /* Kernel is at the base of RAM */
+       li r4, 0                        /* Load the kernel physical address */
+
+       /* Load the kernel PID = 0 */
+       li      r0,0
+       mtspr   SPRN_PID,r0
+       sync
+
+       /* Initialize MMUCR */
+       li      r5,0
+       mtspr   SPRN_MMUCR,r5
+       sync
+
+       /* pageid fields */
+       clrrwi  r3,r3,10                /* Mask off the effective page number */
+       ori     r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
+
+       /* xlat fields */
+       clrrwi  r4,r4,10                /* Mask off the real page number */
+                                       /* ERPN is 0 for first 4GB page */
+
+       /* attrib fields */
+       /* Added guarded bit to protect against speculative loads/stores */
+       li      r5,0
+       ori     r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
+
+        li      r0,63                    /* TLB slot 63 */
+
+       tlbwe   r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
+       tlbwe   r4,r0,PPC44x_TLB_XLAT   /* Load the translation fields */
+       tlbwe   r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
+
+       /* Force context change */
+       mfmsr   r0
+       mtspr   SPRN_SRR1, r0
+       lis     r0,3f@h
+       ori     r0,r0,3f@l
+       mtspr   SPRN_SRR0,r0
+       sync
+       rfi
+
+       /* If necessary, invalidate original entry we used */
+3:     cmpwi   r23,63
+       beq     4f
+       li      r6,0
+       tlbwe   r6,r23,PPC44x_TLB_PAGEID
+       isync
+
+4:
+#ifdef CONFIG_SERIAL_TEXT_DEBUG
+       /*
+        * Add temporary UART mapping for early debug.
+        * We can map UART registers wherever we want as long as they don't
+        * interfere with other system mappings (e.g. with pinned entries).
+        * For an example of how we handle this - see ocotea.h.       --ebs
+        */
+       /* pageid fields */
+       lis     r3,UART0_IO_BASE@h
+       ori     r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K
+
+       /* xlat fields */
+       lis     r4,UART0_PHYS_IO_BASE@h         /* RPN depends on SoC */
+#ifndef CONFIG_440EP
+       ori     r4,r4,0x0001            /* ERPN is 1 for second 4GB page */
+#endif
+
+       /* attrib fields */
+       li      r5,0
+       ori     r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
+
+        li      r0,0                    /* TLB slot 0 */
+
+       tlbwe   r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
+       tlbwe   r4,r0,PPC44x_TLB_XLAT   /* Load the translation fields */
+       tlbwe   r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
+
+       /* Force context change */
+       isync
+#endif /* CONFIG_SERIAL_TEXT_DEBUG */
+
+       /* Establish the interrupt vector offsets */
+       SET_IVOR(0,  CriticalInput);
+       SET_IVOR(1,  MachineCheck);
+       SET_IVOR(2,  DataStorage);
+       SET_IVOR(3,  InstructionStorage);
+       SET_IVOR(4,  ExternalInput);
+       SET_IVOR(5,  Alignment);
+       SET_IVOR(6,  Program);
+       SET_IVOR(7,  FloatingPointUnavailable);
+       SET_IVOR(8,  SystemCall);
+       SET_IVOR(9,  AuxillaryProcessorUnavailable);
+       SET_IVOR(10, Decrementer);
+       SET_IVOR(11, FixedIntervalTimer);
+       SET_IVOR(12, WatchdogTimer);
+       SET_IVOR(13, DataTLBError);
+       SET_IVOR(14, InstructionTLBError);
+       SET_IVOR(15, Debug);
+
+       /* Establish the interrupt vector base */
+       lis     r4,interrupt_base@h     /* IVPR only uses the high 16-bits */
+       mtspr   SPRN_IVPR,r4
+
+#ifdef CONFIG_440EP
+       /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */
+       mfspr   r2,SPRN_CCR0
+       lis     r3,0xffef
+       ori     r3,r3,0xffff
+       and     r2,r2,r3
+       mtspr   SPRN_CCR0,r2
+       isync
+#endif
+
+       /*
+        * This is where the main kernel code starts.
+        */
+
+       /* ptr to current */
+       lis     r2,init_task@h
+       ori     r2,r2,init_task@l
+
+       /* ptr to current thread */
+       addi    r4,r2,THREAD    /* init task's THREAD */
+       mtspr   SPRN_SPRG3,r4
+
+       /* stack */
+       lis     r1,init_thread_union@h
+       ori     r1,r1,init_thread_union@l
+       li      r0,0
+       stwu    r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+
+       bl      early_init
+
+/*
+ * Decide what sort of machine this is and initialize the MMU.
+ */
+       mr      r3,r31
+       mr      r4,r30
+       mr      r5,r29
+       mr      r6,r28
+       mr      r7,r27
+       bl      machine_init
+       bl      MMU_init
+
+       /* Setup PTE pointers for the Abatron bdiGDB */
+       lis     r6, swapper_pg_dir@h
+       ori     r6, r6, swapper_pg_dir@l
+       lis     r5, abatron_pteptrs@h
+       ori     r5, r5, abatron_pteptrs@l
+       lis     r4, KERNELBASE@h
+       ori     r4, r4, KERNELBASE@l
+       stw     r5, 0(r4)       /* Save abatron_pteptrs at a fixed location */
+       stw     r6, 0(r5)
+
+       /* Let's move on */
+       lis     r4,start_kernel@h
+       ori     r4,r4,start_kernel@l
+       lis     r3,MSR_KERNEL@h
+       ori     r3,r3,MSR_KERNEL@l
+       mtspr   SPRN_SRR0,r4
+       mtspr   SPRN_SRR1,r3
+       rfi                     /* change context and jump to start_kernel */
+
+/*
+ * Interrupt vector entry code
+ *
+ * The Book E MMUs are always on so we don't need to handle
+ * interrupts in real mode as with previous PPC processors. In
+ * this case we handle interrupts in the kernel virtual address
+ * space.
+ *
+ * Interrupt vectors are dynamically placed relative to the
+ * interrupt prefix as determined by the address of interrupt_base.
+ * The interrupt vectors offsets are programmed using the labels
+ * for each interrupt vector entry.
+ *
+ * Interrupt vectors must be aligned on a 16 byte boundary.
+ * We align on a 32 byte cache line boundary for good measure.
+ */
+
+interrupt_base:
+       /* Critical Input Interrupt */
+       CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+
+       /* Machine Check Interrupt */
+#ifdef CONFIG_440A
+       MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+#else
+       CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+#endif
+
+       /* Data Storage Interrupt */
+       START_EXCEPTION(DataStorage)
+       mtspr   SPRN_SPRG0, r10         /* Save some working registers */
+       mtspr   SPRN_SPRG1, r11
+       mtspr   SPRN_SPRG4W, r12
+       mtspr   SPRN_SPRG5W, r13
+       mfcr    r11
+       mtspr   SPRN_SPRG7W, r11
+
+       /*
+        * Check if it was a store fault, if not then bail
+        * because a user tried to access a kernel or
+        * read-protected page.  Otherwise, get the
+        * offending address and handle it.
+        */
+       mfspr   r10, SPRN_ESR
+       andis.  r10, r10, ESR_ST@h
+       beq     2f
+
+       mfspr   r10, SPRN_DEAR          /* Get faulting address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11, TASK_SIZE@h
+       cmplw   r10, r11
+       blt+    3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+
+       mfspr   r12,SPRN_MMUCR
+       rlwinm  r12,r12,0,0,23          /* Clear TID */
+
+       b       4f
+
+       /* Get the PGD for the current thread */
+3:
+       mfspr   r11,SPRN_SPRG3
+       lwz     r11,PGDIR(r11)
+
+       /* Load PID into MMUCR TID */
+       mfspr   r12,SPRN_MMUCR          /* Get MMUCR */
+       mfspr   r13,SPRN_PID            /* Get PID */
+       rlwimi  r12,r13,0,24,31         /* Set TID */
+
+4:
+       mtspr   SPRN_MMUCR,r12
+
+       rlwinm  r12, r10, 13, 19, 29    /* Compute pgdir/pmd offset */
+       lwzx    r11, r12, r11           /* Get pgd/pmd entry */
+       rlwinm. r12, r11, 0, 0, 20      /* Extract pt base address */
+       beq     2f                      /* Bail if no table */
+
+       rlwimi  r12, r10, 23, 20, 28    /* Compute pte address */
+       lwz     r11, 4(r12)             /* Get pte entry */
+
+       andi.   r13, r11, _PAGE_RW      /* Is it writeable? */
+       beq     2f                      /* Bail if not */
+
+       /* Update 'changed'.
+       */
+       ori     r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+       stw     r11, 4(r12)             /* Update Linux page table */
+
+       li      r13, PPC44x_TLB_SR@l    /* Set SR */
+       rlwimi  r13, r11, 29, 29, 29    /* SX = _PAGE_HWEXEC */
+       rlwimi  r13, r11, 0, 30, 30     /* SW = _PAGE_RW */
+       rlwimi  r13, r11, 29, 28, 28    /* UR = _PAGE_USER */
+       rlwimi  r12, r11, 31, 26, 26    /* (_PAGE_USER>>1)->r12 */
+       rlwimi  r12, r11, 29, 30, 30    /* (_PAGE_USER>>3)->r12 */
+       and     r12, r12, r11           /* HWEXEC/RW & USER */
+       rlwimi  r13, r12, 0, 26, 26     /* UX = HWEXEC & USER */
+       rlwimi  r13, r12, 3, 27, 27     /* UW = RW & USER */
+
+       rlwimi  r11,r13,0,26,31         /* Insert static perms */
+
+       rlwinm  r11,r11,0,20,15         /* Clear U0-U3 */
+
+       /* find the TLB index that caused the fault.  It has to be here. */
+       tlbsx   r10, 0, r10
+
+       tlbwe   r11, r10, PPC44x_TLB_ATTRIB     /* Write ATTRIB */
+
+       /* Done...restore registers and get out of here.
+       */
+       mfspr   r11, SPRN_SPRG7R
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG5R
+       mfspr   r12, SPRN_SPRG4R
+
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       rfi                     /* Force context change */
+
+2:
+       /*
+        * The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+       mfspr   r11, SPRN_SPRG7R
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG5R
+       mfspr   r12, SPRN_SPRG4R
+
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       b       data_access
+
+       /* Instruction Storage Interrupt */
+       INSTRUCTION_STORAGE_EXCEPTION
+
+       /* External Input Interrupt */
+       EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+
+       /* Alignment Interrupt */
+       ALIGNMENT_EXCEPTION
+
+       /* Program Interrupt */
+       PROGRAM_EXCEPTION
+
+       /* Floating Point Unavailable Interrupt */
+#ifdef CONFIG_PPC_FPU
+       FP_UNAVAILABLE_EXCEPTION
+#else
+       EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+#endif
+
+       /* System Call Interrupt */
+       START_EXCEPTION(SystemCall)
+       NORMAL_EXCEPTION_PROLOG
+       EXC_XFER_EE_LITE(0x0c00, DoSyscall)
+
+       /* Auxillary Processor Unavailable Interrupt */
+       EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+
+       /* Decrementer Interrupt */
+       DECREMENTER_EXCEPTION
+
+       /* Fixed Internal Timer Interrupt */
+       /* TODO: Add FIT support */
+       EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
+
+       /* Watchdog Timer Interrupt */
+       /* TODO: Add watchdog support */
+#ifdef CONFIG_BOOKE_WDT
+       CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
+#else
+       CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
+#endif
+
+       /* Data TLB Error Interrupt */
+       START_EXCEPTION(DataTLBError)
+       mtspr   SPRN_SPRG0, r10         /* Save some working registers */
+       mtspr   SPRN_SPRG1, r11
+       mtspr   SPRN_SPRG4W, r12
+       mtspr   SPRN_SPRG5W, r13
+       mfcr    r11
+       mtspr   SPRN_SPRG7W, r11
+       mfspr   r10, SPRN_DEAR          /* Get faulting address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11, TASK_SIZE@h
+       cmplw   r10, r11
+       blt+    3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+
+       mfspr   r12,SPRN_MMUCR
+       rlwinm  r12,r12,0,0,23          /* Clear TID */
+
+       b       4f
+
+       /* Get the PGD for the current thread */
+3:
+       mfspr   r11,SPRN_SPRG3
+       lwz     r11,PGDIR(r11)
+
+       /* Load PID into MMUCR TID */
+       mfspr   r12,SPRN_MMUCR
+       mfspr   r13,SPRN_PID            /* Get PID */
+       rlwimi  r12,r13,0,24,31         /* Set TID */
+
+4:
+       mtspr   SPRN_MMUCR,r12
+
+       rlwinm  r12, r10, 13, 19, 29    /* Compute pgdir/pmd offset */
+       lwzx    r11, r12, r11           /* Get pgd/pmd entry */
+       rlwinm. r12, r11, 0, 0, 20      /* Extract pt base address */
+       beq     2f                      /* Bail if no table */
+
+       rlwimi  r12, r10, 23, 20, 28    /* Compute pte address */
+       lwz     r11, 4(r12)             /* Get pte entry */
+       andi.   r13, r11, _PAGE_PRESENT /* Is the page present? */
+       beq     2f                      /* Bail if not present */
+
+       ori     r11, r11, _PAGE_ACCESSED
+       stw     r11, 4(r12)
+
+        /* Jump to common tlb load */
+       b       finish_tlb_load
+
+2:
+       /* The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+       mfspr   r11, SPRN_SPRG7R
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG5R
+       mfspr   r12, SPRN_SPRG4R
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       b       data_access
+
+       /* Instruction TLB Error Interrupt */
+       /*
+        * Nearly the same as above, except we get our
+        * information from different registers and bailout
+        * to a different point.
+        */
+       START_EXCEPTION(InstructionTLBError)
+       mtspr   SPRN_SPRG0, r10         /* Save some working registers */
+       mtspr   SPRN_SPRG1, r11
+       mtspr   SPRN_SPRG4W, r12
+       mtspr   SPRN_SPRG5W, r13
+       mfcr    r11
+       mtspr   SPRN_SPRG7W, r11
+       mfspr   r10, SPRN_SRR0          /* Get faulting address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11, TASK_SIZE@h
+       cmplw   r10, r11
+       blt+    3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+
+       mfspr   r12,SPRN_MMUCR
+       rlwinm  r12,r12,0,0,23          /* Clear TID */
+
+       b       4f
+
+       /* Get the PGD for the current thread */
+3:
+       mfspr   r11,SPRN_SPRG3
+       lwz     r11,PGDIR(r11)
+
+       /* Load PID into MMUCR TID */
+       mfspr   r12,SPRN_MMUCR
+       mfspr   r13,SPRN_PID            /* Get PID */
+       rlwimi  r12,r13,0,24,31         /* Set TID */
+
+4:
+       mtspr   SPRN_MMUCR,r12
+
+       rlwinm  r12, r10, 13, 19, 29    /* Compute pgdir/pmd offset */
+       lwzx    r11, r12, r11           /* Get pgd/pmd entry */
+       rlwinm. r12, r11, 0, 0, 20      /* Extract pt base address */
+       beq     2f                      /* Bail if no table */
+
+       rlwimi  r12, r10, 23, 20, 28    /* Compute pte address */
+       lwz     r11, 4(r12)             /* Get pte entry */
+       andi.   r13, r11, _PAGE_PRESENT /* Is the page present? */
+       beq     2f                      /* Bail if not present */
+
+       ori     r11, r11, _PAGE_ACCESSED
+       stw     r11, 4(r12)
+
+       /* Jump to common TLB load point */
+       b       finish_tlb_load
+
+2:
+       /* The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+       mfspr   r11, SPRN_SPRG7R
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG5R
+       mfspr   r12, SPRN_SPRG4R
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       b       InstructionStorage
+
+       /* Debug Interrupt */
+       DEBUG_EXCEPTION
+
+/*
+ * Local functions
+ */
+       /*
+        * Data TLB exceptions will bail out to this point
+        * if they can't resolve the lightweight TLB fault.
+        */
+data_access:
+       NORMAL_EXCEPTION_PROLOG
+       mfspr   r5,SPRN_ESR             /* Grab the ESR, save it, pass arg3 */
+       stw     r5,_ESR(r11)
+       mfspr   r4,SPRN_DEAR            /* Grab the DEAR, save it, pass arg2 */
+       EXC_XFER_EE_LITE(0x0300, handle_page_fault)
+
+/*
+
+ * Both the instruction and data TLB miss get to this
+ * point to load the TLB.
+ *     r10 - EA of fault
+ *     r11 - available to use
+ *     r12 - Pointer to the 64-bit PTE
+ *     r13 - available to use
+ *     MMUCR - loaded with proper value when we get here
+ *     Upon exit, we reload everything and RFI.
+ */
+finish_tlb_load:
+       /*
+        * We set execute, because we don't have the granularity to
+        * properly set this at the page level (Linux problem).
+        * If shared is set, we cause a zero PID->TID load.
+        * Many of these bits are software only.  Bits we don't set
+        * here we (properly should) assume have the appropriate value.
+        */
+
+       /* Load the next available TLB index */
+       lis     r13, tlb_44x_index@ha
+       lwz     r13, tlb_44x_index@l(r13)
+       /* Load the TLB high watermark */
+       lis     r11, tlb_44x_hwater@ha
+       lwz     r11, tlb_44x_hwater@l(r11)
+
+       /* Increment, rollover, and store TLB index */
+       addi    r13, r13, 1
+       cmpw    0, r13, r11                     /* reserve entries */
+       ble     7f
+       li      r13, 0
+7:
+       /* Store the next available TLB index */
+       lis     r11, tlb_44x_index@ha
+       stw     r13, tlb_44x_index@l(r11)
+
+       lwz     r11, 0(r12)                     /* Get MS word of PTE */
+       lwz     r12, 4(r12)                     /* Get LS word of PTE */
+       rlwimi  r11, r12, 0, 0 , 19             /* Insert RPN */
+       tlbwe   r11, r13, PPC44x_TLB_XLAT       /* Write XLAT */
+
+       /*
+        * Create PAGEID. This is the faulting address,
+        * page size, and valid flag.
+        */
+       li      r11, PPC44x_TLB_VALID | PPC44x_TLB_4K
+       rlwimi  r10, r11, 0, 20, 31             /* Insert valid and page size */
+       tlbwe   r10, r13, PPC44x_TLB_PAGEID     /* Write PAGEID */
+
+       li      r10, PPC44x_TLB_SR@l            /* Set SR */
+       rlwimi  r10, r12, 0, 30, 30             /* Set SW = _PAGE_RW */
+       rlwimi  r10, r12, 29, 29, 29            /* SX = _PAGE_HWEXEC */
+       rlwimi  r10, r12, 29, 28, 28            /* UR = _PAGE_USER */
+       rlwimi  r11, r12, 31, 26, 26            /* (_PAGE_USER>>1)->r12 */
+       and     r11, r12, r11                   /* HWEXEC & USER */
+       rlwimi  r10, r11, 0, 26, 26             /* UX = HWEXEC & USER */
+
+       rlwimi  r12, r10, 0, 26, 31             /* Insert static perms */
+       rlwinm  r12, r12, 0, 20, 15             /* Clear U0-U3 */
+       tlbwe   r12, r13, PPC44x_TLB_ATTRIB     /* Write ATTRIB */
+
+       /* Done...restore registers and get out of here.
+       */
+       mfspr   r11, SPRN_SPRG7R
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG5R
+       mfspr   r12, SPRN_SPRG4R
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       rfi                                     /* Force context change */
+
+/*
+ * Global functions
+ */
+
+/*
+ * extern void giveup_altivec(struct task_struct *prev)
+ *
+ * The 44x core does not have an AltiVec unit.
+ */
+_GLOBAL(giveup_altivec)
+       blr
+
+/*
+ * extern void giveup_fpu(struct task_struct *prev)
+ *
+ * The 44x core does not have an FPU.
+ */
+#ifndef CONFIG_PPC_FPU
+_GLOBAL(giveup_fpu)
+       blr
+#endif
+
+/*
+ * extern void abort(void)
+ *
+ * At present, this routine just applies a system reset.
+ */
+_GLOBAL(abort)
+        mfspr   r13,SPRN_DBCR0
+        oris    r13,r13,DBCR0_RST_SYSTEM@h
+        mtspr   SPRN_DBCR0,r13
+
+_GLOBAL(set_context)
+
+#ifdef CONFIG_BDI_SWITCH
+       /* Context switch the PTE pointer for the Abatron BDI2000.
+        * The PGDIR is the second parameter.
+        */
+       lis     r5, abatron_pteptrs@h
+       ori     r5, r5, abatron_pteptrs@l
+       stw     r4, 0x4(r5)
+#endif
+       mtspr   SPRN_PID,r3
+       isync                   /* Force context change */
+       blr
+
+/*
+ * We put a few things here that have to be page-aligned. This stuff
+ * goes at the beginning of the data segment, which is page-aligned.
+ */
+       .data
+       .align  12
+       .globl  sdata
+sdata:
+       .globl  empty_zero_page
+empty_zero_page:
+       .space  4096
+
+/*
+ * To support >32-bit physical addresses, we use an 8KB pgdir.
+ */
+       .globl  swapper_pg_dir
+swapper_pg_dir:
+       .space  8192
+
+/* Reserved 4k for the critical exception stack & 4k for the machine
+ * check stack per CPU for kernel mode exceptions */
+       .section .bss
+        .align 12
+exception_stack_bottom:
+       .space  BOOKE_EXCEPTION_STACK_SIZE
+       .globl  exception_stack_top
+exception_stack_top:
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+       .globl  cmd_line
+cmd_line:
+       .space  512
+
+/*
+ * Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+abatron_pteptrs:
+       .space  8
diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_4xx.S
new file mode 100644 (file)
index 0000000..10c261c
--- /dev/null
@@ -0,0 +1,1022 @@
+/*
+ *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
+ *      Initial PowerPC version.
+ *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *      Rewritten for PReP
+ *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ *      Low-level exception handers, MMU support, and rewrite.
+ *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
+ *      PowerPC 8xx modifications.
+ *    Copyright (c) 1998-1999 TiVo, Inc.
+ *      PowerPC 403GCX modifications.
+ *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ *      PowerPC 403GCX/405GP modifications.
+ *    Copyright 2000 MontaVista Software Inc.
+ *     PPC405 modifications
+ *      PowerPC 403GCX/405GP modifications.
+ *     Author: MontaVista Software, Inc.
+ *             frank_rowand@mvista.com or source@mvista.com
+ *             debbie_chu@mvista.com
+ *
+ *
+ *    Module name: head_4xx.S
+ *
+ *    Description:
+ *      Kernel execution entry point code.
+ *
+ *    This program is free software; you can redistribute it and/or
+ *    modify it under the terms of the GNU General Public License
+ *    as published by the Free Software Foundation; either version
+ *    2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/ibm4xx.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/* As with the other PowerPC ports, it is expected that when code
+ * execution begins here, the following registers contain valid, yet
+ * optional, information:
+ *
+ *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
+ *   r4 - Starting address of the init RAM disk
+ *   r5 - Ending address of the init RAM disk
+ *   r6 - Start of kernel command line string (e.g. "mem=96m")
+ *   r7 - End of kernel command line string
+ *
+ * This is all going to change RSN when we add bi_recs.......  -- Dan
+ */
+       .text
+_GLOBAL(_stext)
+_GLOBAL(_start)
+
+       /* Save parameters we are passed.
+       */
+       mr      r31,r3
+       mr      r30,r4
+       mr      r29,r5
+       mr      r28,r6
+       mr      r27,r7
+
+       /* We have to turn on the MMU right away so we get cache modes
+        * set correctly.
+        */
+       bl      initial_mmu
+
+/* We now have the lower 16 Meg mapped into TLB entries, and the caches
+ * ready to work.
+ */
+turn_on_mmu:
+       lis     r0,MSR_KERNEL@h
+       ori     r0,r0,MSR_KERNEL@l
+       mtspr   SPRN_SRR1,r0
+       lis     r0,start_here@h
+       ori     r0,r0,start_here@l
+       mtspr   SPRN_SRR0,r0
+       SYNC
+       rfi                             /* enables MMU */
+       b       .                       /* prevent prefetch past rfi */
+
+/*
+ * This area is used for temporarily saving registers during the
+ * critical exception prolog.
+ */
+       . = 0xc0
+crit_save:
+_GLOBAL(crit_r10)
+       .space  4
+_GLOBAL(crit_r11)
+       .space  4
+
+/*
+ * Exception vector entry code. This code runs with address translation
+ * turned off (i.e. using physical addresses). We assume SPRG3 has the
+ * physical address of the current task thread_struct.
+ * Note that we have to have decremented r1 before we write to any fields
+ * of the exception frame, since a critical interrupt could occur at any
+ * time, and it will write to the area immediately below the current r1.
+ */
+#define NORMAL_EXCEPTION_PROLOG                                                     \
+       mtspr   SPRN_SPRG0,r10;         /* save two registers to work with */\
+       mtspr   SPRN_SPRG1,r11;                                              \
+       mtspr   SPRN_SPRG2,r1;                                               \
+       mfcr    r10;                    /* save CR in r10 for now          */\
+       mfspr   r11,SPRN_SRR1;          /* check whether user or kernel    */\
+       andi.   r11,r11,MSR_PR;                                              \
+       beq     1f;                                                          \
+       mfspr   r1,SPRN_SPRG3;          /* if from user, start at top of   */\
+       lwz     r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack   */\
+       addi    r1,r1,THREAD_SIZE;                                           \
+1:     subi    r1,r1,INT_FRAME_SIZE;   /* Allocate an exception frame     */\
+       tophys(r11,r1);                                                      \
+       stw     r10,_CCR(r11);          /* save various registers          */\
+       stw     r12,GPR12(r11);                                              \
+       stw     r9,GPR9(r11);                                                \
+       mfspr   r10,SPRN_SPRG0;                                              \
+       stw     r10,GPR10(r11);                                              \
+       mfspr   r12,SPRN_SPRG1;                                              \
+       stw     r12,GPR11(r11);                                              \
+       mflr    r10;                                                         \
+       stw     r10,_LINK(r11);                                              \
+       mfspr   r10,SPRN_SPRG2;                                              \
+       mfspr   r12,SPRN_SRR0;                                               \
+       stw     r10,GPR1(r11);                                               \
+       mfspr   r9,SPRN_SRR1;                                                \
+       stw     r10,0(r11);                                                  \
+       rlwinm  r9,r9,0,14,12;          /* clear MSR_WE (necessary?)       */\
+       stw     r0,GPR0(r11);                                                \
+       SAVE_4GPRS(3, r11);                                                  \
+       SAVE_2GPRS(7, r11)
+
+/*
+ * Exception prolog for critical exceptions.  This is a little different
+ * from the normal exception prolog above since a critical exception
+ * can potentially occur at any point during normal exception processing.
+ * Thus we cannot use the same SPRG registers as the normal prolog above.
+ * Instead we use a couple of words of memory at low physical addresses.
+ * This is OK since we don't support SMP on these processors.
+ */
+#define CRITICAL_EXCEPTION_PROLOG                                           \
+       stw     r10,crit_r10@l(0);      /* save two registers to work with */\
+       stw     r11,crit_r11@l(0);                                           \
+       mfcr    r10;                    /* save CR in r10 for now          */\
+       mfspr   r11,SPRN_SRR3;          /* check whether user or kernel    */\
+       andi.   r11,r11,MSR_PR;                                              \
+       lis     r11,critical_stack_top@h;                                    \
+       ori     r11,r11,critical_stack_top@l;                                \
+       beq     1f;                                                          \
+       /* COMING FROM USER MODE */                                          \
+       mfspr   r11,SPRN_SPRG3;         /* if from user, start at top of   */\
+       lwz     r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
+       addi    r11,r11,THREAD_SIZE;                                         \
+1:     subi    r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame     */\
+       tophys(r11,r11);                                                     \
+       stw     r10,_CCR(r11);          /* save various registers          */\
+       stw     r12,GPR12(r11);                                              \
+       stw     r9,GPR9(r11);                                                \
+       mflr    r10;                                                         \
+       stw     r10,_LINK(r11);                                              \
+       mfspr   r12,SPRN_DEAR;          /* save DEAR and ESR in the frame  */\
+       stw     r12,_DEAR(r11);         /* since they may have had stuff   */\
+       mfspr   r9,SPRN_ESR;            /* in them at the point where the  */\
+       stw     r9,_ESR(r11);           /* exception was taken             */\
+       mfspr   r12,SPRN_SRR2;                                               \
+       stw     r1,GPR1(r11);                                                \
+       mfspr   r9,SPRN_SRR3;                                                \
+       stw     r1,0(r11);                                                   \
+       tovirt(r1,r11);                                                      \
+       rlwinm  r9,r9,0,14,12;          /* clear MSR_WE (necessary?)       */\
+       stw     r0,GPR0(r11);                                                \
+       SAVE_4GPRS(3, r11);                                                  \
+       SAVE_2GPRS(7, r11)
+
+       /*
+        * State at this point:
+        * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
+        * r10 saved in crit_r10 and in stack frame, trashed
+        * r11 saved in crit_r11 and in stack frame,
+        *      now phys stack/exception frame pointer
+        * r12 saved in stack frame, now saved SRR2
+        * CR saved in stack frame, CR0.EQ = !SRR3.PR
+        * LR, DEAR, ESR in stack frame
+        * r1 saved in stack frame, now virt stack/excframe pointer
+        * r0, r3-r8 saved in stack frame
+        */
+
+/*
+ * Exception vectors.
+ */
+#define        START_EXCEPTION(n, label)                                            \
+       . = n;                                                               \
+label:
+
+#define EXCEPTION(n, label, hdlr, xfer)                                \
+       START_EXCEPTION(n, label);                              \
+       NORMAL_EXCEPTION_PROLOG;                                \
+       addi    r3,r1,STACK_FRAME_OVERHEAD;                     \
+       xfer(n, hdlr)
+
+#define CRITICAL_EXCEPTION(n, label, hdlr)                     \
+       START_EXCEPTION(n, label);                              \
+       CRITICAL_EXCEPTION_PROLOG;                              \
+       addi    r3,r1,STACK_FRAME_OVERHEAD;                     \
+       EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
+                         NOCOPY, crit_transfer_to_handler,     \
+                         ret_from_crit_exc)
+
+#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret)  \
+       li      r10,trap;                                       \
+       stw     r10,TRAP(r11);                                  \
+       lis     r10,msr@h;                                      \
+       ori     r10,r10,msr@l;                                  \
+       copyee(r10, r9);                                        \
+       bl      tfer;                                           \
+       .long   hdlr;                                           \
+       .long   ret
+
+#define COPY_EE(d, s)          rlwimi d,s,0,16,16
+#define NOCOPY(d, s)
+
+#define EXC_XFER_STD(n, hdlr)          \
+       EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
+                         ret_from_except_full)
+
+#define EXC_XFER_LITE(n, hdlr)         \
+       EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
+                         ret_from_except)
+
+#define EXC_XFER_EE(n, hdlr)           \
+       EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
+                         ret_from_except_full)
+
+#define EXC_XFER_EE_LITE(n, hdlr)      \
+       EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
+                         ret_from_except)
+
+
+/*
+ * 0x0100 - Critical Interrupt Exception
+ */
+       CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
+
+/*
+ * 0x0200 - Machine Check Exception
+ */
+       CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+
+/*
+ * 0x0300 - Data Storage Exception
+ * This happens for just a few reasons.  U0 set (but we don't do that),
+ * or zone protection fault (user violation, write to protected page).
+ * If this is just an update of modified status, we do that quickly
+ * and exit.  Otherwise, we call heavywight functions to do the work.
+ */
+       START_EXCEPTION(0x0300, DataStorage)
+       mtspr   SPRN_SPRG0, r10         /* Save some working registers */
+       mtspr   SPRN_SPRG1, r11
+#ifdef CONFIG_403GCX
+       stw     r12, 0(r0)
+       stw     r9, 4(r0)
+       mfcr    r11
+       mfspr   r12, SPRN_PID
+       stw     r11, 8(r0)
+       stw     r12, 12(r0)
+#else
+       mtspr   SPRN_SPRG4, r12
+       mtspr   SPRN_SPRG5, r9
+       mfcr    r11
+       mfspr   r12, SPRN_PID
+       mtspr   SPRN_SPRG7, r11
+       mtspr   SPRN_SPRG6, r12
+#endif
+
+       /* First, check if it was a zone fault (which means a user
+       * tried to access a kernel or read-protected page - always
+       * a SEGV).  All other faults here must be stores, so no
+       * need to check ESR_DST as well. */
+       mfspr   r10, SPRN_ESR
+       andis.  r10, r10, ESR_DIZ@h
+       bne     2f
+
+       mfspr   r10, SPRN_DEAR          /* Get faulting address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11, TASK_SIZE@h
+       cmplw   r10, r11
+       blt+    3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+       li      r9, 0
+       mtspr   SPRN_PID, r9            /* TLB will have 0 TID */
+       b       4f
+
+       /* Get the PGD for the current thread.
+        */
+3:
+       mfspr   r11,SPRN_SPRG3
+       lwz     r11,PGDIR(r11)
+4:
+       tophys(r11, r11)
+       rlwimi  r11, r10, 12, 20, 29    /* Create L1 (pgdir/pmd) address */
+       lwz     r11, 0(r11)             /* Get L1 entry */
+       rlwinm. r12, r11, 0, 0, 19      /* Extract L2 (pte) base address */
+       beq     2f                      /* Bail if no table */
+
+       rlwimi  r12, r10, 22, 20, 29    /* Compute PTE address */
+       lwz     r11, 0(r12)             /* Get Linux PTE */
+
+       andi.   r9, r11, _PAGE_RW       /* Is it writeable? */
+       beq     2f                      /* Bail if not */
+
+       /* Update 'changed'.
+       */
+       ori     r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+       stw     r11, 0(r12)             /* Update Linux page table */
+
+       /* Most of the Linux PTE is ready to load into the TLB LO.
+        * We set ZSEL, where only the LS-bit determines user access.
+        * We set execute, because we don't have the granularity to
+        * properly set this at the page level (Linux problem).
+        * If shared is set, we cause a zero PID->TID load.
+        * Many of these bits are software only.  Bits we don't set
+        * here we (properly should) assume have the appropriate value.
+        */
+       li      r12, 0x0ce2
+       andc    r11, r11, r12           /* Make sure 20, 21 are zero */
+
+       /* find the TLB index that caused the fault.  It has to be here.
+       */
+       tlbsx   r9, 0, r10
+
+       tlbwe   r11, r9, TLB_DATA               /* Load TLB LO */
+
+       /* Done...restore registers and get out of here.
+       */
+#ifdef CONFIG_403GCX
+       lwz     r12, 12(r0)
+       lwz     r11, 8(r0)
+       mtspr   SPRN_PID, r12
+       mtcr    r11
+       lwz     r9, 4(r0)
+       lwz     r12, 0(r0)
+#else
+       mfspr   r12, SPRN_SPRG6
+       mfspr   r11, SPRN_SPRG7
+       mtspr   SPRN_PID, r12
+       mtcr    r11
+       mfspr   r9, SPRN_SPRG5
+       mfspr   r12, SPRN_SPRG4
+#endif
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       PPC405_ERR77_SYNC
+       rfi                     /* Should sync shadow TLBs */
+       b       .               /* prevent prefetch past rfi */
+
+2:
+       /* The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+#ifdef CONFIG_403GCX
+       lwz     r12, 12(r0)
+       lwz     r11, 8(r0)
+       mtspr   SPRN_PID, r12
+       mtcr    r11
+       lwz     r9, 4(r0)
+       lwz     r12, 0(r0)
+#else
+       mfspr   r12, SPRN_SPRG6
+       mfspr   r11, SPRN_SPRG7
+       mtspr   SPRN_PID, r12
+       mtcr    r11
+       mfspr   r9, SPRN_SPRG5
+       mfspr   r12, SPRN_SPRG4
+#endif
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       b       DataAccess
+
+/*
+ * 0x0400 - Instruction Storage Exception
+ * This is caused by a fetch from non-execute or guarded pages.
+ */
+       START_EXCEPTION(0x0400, InstructionAccess)
+       NORMAL_EXCEPTION_PROLOG
+       mr      r4,r12                  /* Pass SRR0 as arg2 */
+       li      r5,0                    /* Pass zero as arg3 */
+       EXC_XFER_EE_LITE(0x400, handle_page_fault)
+
+/* 0x0500 - External Interrupt Exception */
+       EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+
+/* 0x0600 - Alignment Exception */
+       START_EXCEPTION(0x0600, Alignment)
+       NORMAL_EXCEPTION_PROLOG
+       mfspr   r4,SPRN_DEAR            /* Grab the DEAR and save it */
+       stw     r4,_DEAR(r11)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_EE(0x600, alignment_exception)
+
+/* 0x0700 - Program Exception */
+       START_EXCEPTION(0x0700, ProgramCheck)
+       NORMAL_EXCEPTION_PROLOG
+       mfspr   r4,SPRN_ESR             /* Grab the ESR and save it */
+       stw     r4,_ESR(r11)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_STD(0x700, program_check_exception)
+
+       EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
+
+/* 0x0C00 - System Call Exception */
+       START_EXCEPTION(0x0C00, SystemCall)
+       NORMAL_EXCEPTION_PROLOG
+       EXC_XFER_EE_LITE(0xc00, DoSyscall)
+
+       EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
+
+/* 0x1000 - Programmable Interval Timer (PIT) Exception */
+       START_EXCEPTION(0x1000, Decrementer)
+       NORMAL_EXCEPTION_PROLOG
+       lis     r0,TSR_PIS@h
+       mtspr   SPRN_TSR,r0             /* Clear the PIT exception */
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_LITE(0x1000, timer_interrupt)
+
+#if 0
+/* NOTE:
+ * FIT and WDT handlers are not implemented yet.
+ */
+
+/* 0x1010 - Fixed Interval Timer (FIT) Exception
+*/
+       STND_EXCEPTION(0x1010,  FITException,           unknown_exception)
+
+/* 0x1020 - Watchdog Timer (WDT) Exception
+*/
+#ifdef CONFIG_BOOKE_WDT
+       CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
+#else
+       CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
+#endif
+#endif
+
+/* 0x1100 - Data TLB Miss Exception
+ * As the name implies, translation is not in the MMU, so search the
+ * page tables and fix it.  The only purpose of this function is to
+ * load TLB entries from the page table if they exist.
+ */
+       START_EXCEPTION(0x1100, DTLBMiss)
+       mtspr   SPRN_SPRG0, r10         /* Save some working registers */
+       mtspr   SPRN_SPRG1, r11
+#ifdef CONFIG_403GCX
+       stw     r12, 0(r0)
+       stw     r9, 4(r0)
+       mfcr    r11
+       mfspr   r12, SPRN_PID
+       stw     r11, 8(r0)
+       stw     r12, 12(r0)
+#else
+       mtspr   SPRN_SPRG4, r12
+       mtspr   SPRN_SPRG5, r9
+       mfcr    r11
+       mfspr   r12, SPRN_PID
+       mtspr   SPRN_SPRG7, r11
+       mtspr   SPRN_SPRG6, r12
+#endif
+       mfspr   r10, SPRN_DEAR          /* Get faulting address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11, TASK_SIZE@h
+       cmplw   r10, r11
+       blt+    3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+       li      r9, 0
+       mtspr   SPRN_PID, r9            /* TLB will have 0 TID */
+       b       4f
+
+       /* Get the PGD for the current thread.
+        */
+3:
+       mfspr   r11,SPRN_SPRG3
+       lwz     r11,PGDIR(r11)
+4:
+       tophys(r11, r11)
+       rlwimi  r11, r10, 12, 20, 29    /* Create L1 (pgdir/pmd) address */
+       lwz     r12, 0(r11)             /* Get L1 entry */
+       andi.   r9, r12, _PMD_PRESENT   /* Check if it points to a PTE page */
+       beq     2f                      /* Bail if no table */
+
+       rlwimi  r12, r10, 22, 20, 29    /* Compute PTE address */
+       lwz     r11, 0(r12)             /* Get Linux PTE */
+       andi.   r9, r11, _PAGE_PRESENT
+       beq     5f
+
+       ori     r11, r11, _PAGE_ACCESSED
+       stw     r11, 0(r12)
+
+       /* Create TLB tag.  This is the faulting address plus a static
+        * set of bits.  These are size, valid, E, U0.
+       */
+       li      r12, 0x00c0
+       rlwimi  r10, r12, 0, 20, 31
+
+       b       finish_tlb_load
+
+2:     /* Check for possible large-page pmd entry */
+       rlwinm. r9, r12, 2, 22, 24
+       beq     5f
+
+       /* Create TLB tag.  This is the faulting address, plus a static
+        * set of bits (valid, E, U0) plus the size from the PMD.
+        */
+       ori     r9, r9, 0x40
+       rlwimi  r10, r9, 0, 20, 31
+       mr      r11, r12
+
+       b       finish_tlb_load
+
+5:
+       /* The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+#ifdef CONFIG_403GCX
+       lwz     r12, 12(r0)
+       lwz     r11, 8(r0)
+       mtspr   SPRN_PID, r12
+       mtcr    r11
+       lwz     r9, 4(r0)
+       lwz     r12, 0(r0)
+#else
+       mfspr   r12, SPRN_SPRG6
+       mfspr   r11, SPRN_SPRG7
+       mtspr   SPRN_PID, r12
+       mtcr    r11
+       mfspr   r9, SPRN_SPRG5
+       mfspr   r12, SPRN_SPRG4
+#endif
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       b       DataAccess
+
+/* 0x1200 - Instruction TLB Miss Exception
+ * Nearly the same as above, except we get our information from different
+ * registers and bailout to a different point.
+ */
+       START_EXCEPTION(0x1200, ITLBMiss)
+       mtspr   SPRN_SPRG0, r10         /* Save some working registers */
+       mtspr   SPRN_SPRG1, r11
+#ifdef CONFIG_403GCX
+       stw     r12, 0(r0)
+       stw     r9, 4(r0)
+       mfcr    r11
+       mfspr   r12, SPRN_PID
+       stw     r11, 8(r0)
+       stw     r12, 12(r0)
+#else
+       mtspr   SPRN_SPRG4, r12
+       mtspr   SPRN_SPRG5, r9
+       mfcr    r11
+       mfspr   r12, SPRN_PID
+       mtspr   SPRN_SPRG7, r11
+       mtspr   SPRN_SPRG6, r12
+#endif
+       mfspr   r10, SPRN_SRR0          /* Get faulting address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11, TASK_SIZE@h
+       cmplw   r10, r11
+       blt+    3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+       li      r9, 0
+       mtspr   SPRN_PID, r9            /* TLB will have 0 TID */
+       b       4f
+
+       /* Get the PGD for the current thread.
+        */
+3:
+       mfspr   r11,SPRN_SPRG3
+       lwz     r11,PGDIR(r11)
+4:
+       tophys(r11, r11)
+       rlwimi  r11, r10, 12, 20, 29    /* Create L1 (pgdir/pmd) address */
+       lwz     r12, 0(r11)             /* Get L1 entry */
+       andi.   r9, r12, _PMD_PRESENT   /* Check if it points to a PTE page */
+       beq     2f                      /* Bail if no table */
+
+       rlwimi  r12, r10, 22, 20, 29    /* Compute PTE address */
+       lwz     r11, 0(r12)             /* Get Linux PTE */
+       andi.   r9, r11, _PAGE_PRESENT
+       beq     5f
+
+       ori     r11, r11, _PAGE_ACCESSED
+       stw     r11, 0(r12)
+
+       /* Create TLB tag.  This is the faulting address plus a static
+        * set of bits.  These are size, valid, E, U0.
+       */
+       li      r12, 0x00c0
+       rlwimi  r10, r12, 0, 20, 31
+
+       b       finish_tlb_load
+
+2:     /* Check for possible large-page pmd entry */
+       rlwinm. r9, r12, 2, 22, 24
+       beq     5f
+
+       /* Create TLB tag.  This is the faulting address, plus a static
+        * set of bits (valid, E, U0) plus the size from the PMD.
+        */
+       ori     r9, r9, 0x40
+       rlwimi  r10, r9, 0, 20, 31
+       mr      r11, r12
+
+       b       finish_tlb_load
+
+5:
+       /* The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+#ifdef CONFIG_403GCX
+       lwz     r12, 12(r0)
+       lwz     r11, 8(r0)
+       mtspr   SPRN_PID, r12
+       mtcr    r11
+       lwz     r9, 4(r0)
+       lwz     r12, 0(r0)
+#else
+       mfspr   r12, SPRN_SPRG6
+       mfspr   r11, SPRN_SPRG7
+       mtspr   SPRN_PID, r12
+       mtcr    r11
+       mfspr   r9, SPRN_SPRG5
+       mfspr   r12, SPRN_SPRG4
+#endif
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       b       InstructionAccess
+
+       EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
+#ifdef CONFIG_IBM405_ERR51
+       /* 405GP errata 51 */
+       START_EXCEPTION(0x1700, Trap_17)
+       b DTLBMiss
+#else
+       EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
+#endif
+       EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
+
+/* Check for a single step debug exception while in an exception
+ * handler before state has been saved.  This is to catch the case
+ * where an instruction that we are trying to single step causes
+ * an exception (eg ITLB/DTLB miss) and thus the first instruction of
+ * the exception handler generates a single step debug exception.
+ *
+ * If we get a debug trap on the first instruction of an exception handler,
+ * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
+ * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
+ * The exception handler was handling a non-critical interrupt, so it will
+ * save (and later restore) the MSR via SPRN_SRR1, which will still have
+ * the MSR_DE bit set.
+ */
+       /* 0x2000 - Debug Exception */
+       START_EXCEPTION(0x2000, DebugTrap)
+       CRITICAL_EXCEPTION_PROLOG
+
+       /*
+        * If this is a single step or branch-taken exception in an
+        * exception entry sequence, it was probably meant to apply to
+        * the code where the exception occurred (since exception entry
+        * doesn't turn off DE automatically).  We simulate the effect
+        * of turning off DE on entry to an exception handler by turning
+        * off DE in the SRR3 value and clearing the debug status.
+        */
+       mfspr   r10,SPRN_DBSR           /* check single-step/branch taken */
+       andis.  r10,r10,DBSR_IC@h
+       beq+    2f
+
+       andi.   r10,r9,MSR_IR|MSR_PR    /* check supervisor + MMU off */
+       beq     1f                      /* branch and fix it up */
+
+       mfspr   r10,SPRN_SRR2           /* Faulting instruction address */
+       cmplwi  r10,0x2100
+       bgt+    2f                      /* address above exception vectors */
+
+       /* here it looks like we got an inappropriate debug exception. */
+1:     rlwinm  r9,r9,0,~MSR_DE         /* clear DE in the SRR3 value */
+       lis     r10,DBSR_IC@h           /* clear the IC event */
+       mtspr   SPRN_DBSR,r10
+       /* restore state and get out */
+       lwz     r10,_CCR(r11)
+       lwz     r0,GPR0(r11)
+       lwz     r1,GPR1(r11)
+       mtcrf   0x80,r10
+       mtspr   SPRN_SRR2,r12
+       mtspr   SPRN_SRR3,r9
+       lwz     r9,GPR9(r11)
+       lwz     r12,GPR12(r11)
+       lwz     r10,crit_r10@l(0)
+       lwz     r11,crit_r11@l(0)
+       PPC405_ERR77_SYNC
+       rfci
+       b       .
+
+       /* continue normal handling for a critical exception... */
+2:     mfspr   r4,SPRN_DBSR
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_TEMPLATE(DebugException, 0x2002, \
+               (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
+               NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
+
+/*
+ * The other Data TLB exceptions bail out to this point
+ * if they can't resolve the lightweight TLB fault.
+ */
+DataAccess:
+       NORMAL_EXCEPTION_PROLOG
+       mfspr   r5,SPRN_ESR             /* Grab the ESR, save it, pass arg3 */
+       stw     r5,_ESR(r11)
+       mfspr   r4,SPRN_DEAR            /* Grab the DEAR, save it, pass arg2 */
+       EXC_XFER_EE_LITE(0x300, handle_page_fault)
+
+/* Other PowerPC processors, namely those derived from the 6xx-series
+ * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
+ * However, for the 4xx-series processors these are neither defined nor
+ * reserved.
+ */
+
+       /* Damn, I came up one instruction too many to fit into the
+        * exception space :-).  Both the instruction and data TLB
+        * miss get to this point to load the TLB.
+        *      r10 - TLB_TAG value
+        *      r11 - Linux PTE
+        *      r12, r9 - avilable to use
+        *      PID - loaded with proper value when we get here
+        *      Upon exit, we reload everything and RFI.
+        * Actually, it will fit now, but oh well.....a common place
+        * to load the TLB.
+        */
+tlb_4xx_index:
+       .long   0
+finish_tlb_load:
+       /* load the next available TLB index.
+       */
+       lwz     r9, tlb_4xx_index@l(0)
+       addi    r9, r9, 1
+       andi.   r9, r9, (PPC4XX_TLB_SIZE-1)
+       stw     r9, tlb_4xx_index@l(0)
+
+6:
+       /*
+        * Clear out the software-only bits in the PTE to generate the
+        * TLB_DATA value.  These are the bottom 2 bits of the RPM, the
+        * top 3 bits of the zone field, and M.
+        */
+       li      r12, 0x0ce2
+       andc    r11, r11, r12
+
+       tlbwe   r11, r9, TLB_DATA               /* Load TLB LO */
+       tlbwe   r10, r9, TLB_TAG                /* Load TLB HI */
+
+       /* Done...restore registers and get out of here.
+       */
+#ifdef CONFIG_403GCX
+       lwz     r12, 12(r0)
+       lwz     r11, 8(r0)
+       mtspr   SPRN_PID, r12
+       mtcr    r11
+       lwz     r9, 4(r0)
+       lwz     r12, 0(r0)
+#else
+       mfspr   r12, SPRN_SPRG6
+       mfspr   r11, SPRN_SPRG7
+       mtspr   SPRN_PID, r12
+       mtcr    r11
+       mfspr   r9, SPRN_SPRG5
+       mfspr   r12, SPRN_SPRG4
+#endif
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       PPC405_ERR77_SYNC
+       rfi                     /* Should sync shadow TLBs */
+       b       .               /* prevent prefetch past rfi */
+
+/* extern void giveup_fpu(struct task_struct *prev)
+ *
+ * The PowerPC 4xx family of processors do not have an FPU, so this just
+ * returns.
+ */
+_GLOBAL(giveup_fpu)
+       blr
+
+/* This is where the main kernel code starts.
+ */
+start_here:
+
+       /* ptr to current */
+       lis     r2,init_task@h
+       ori     r2,r2,init_task@l
+
+       /* ptr to phys current thread */
+       tophys(r4,r2)
+       addi    r4,r4,THREAD    /* init task's THREAD */
+       mtspr   SPRN_SPRG3,r4
+
+       /* stack */
+       lis     r1,init_thread_union@ha
+       addi    r1,r1,init_thread_union@l
+       li      r0,0
+       stwu    r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+
+       bl      early_init      /* We have to do this with MMU on */
+
+/*
+ * Decide what sort of machine this is and initialize the MMU.
+ */
+       mr      r3,r31
+       mr      r4,r30
+       mr      r5,r29
+       mr      r6,r28
+       mr      r7,r27
+       bl      machine_init
+       bl      MMU_init
+
+/* Go back to running unmapped so we can load up new values
+ * and change to using our exception vectors.
+ * On the 4xx, all we have to do is invalidate the TLB to clear
+ * the old 16M byte TLB mappings.
+ */
+       lis     r4,2f@h
+       ori     r4,r4,2f@l
+       tophys(r4,r4)
+       lis     r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
+       ori     r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
+       mtspr   SPRN_SRR0,r4
+       mtspr   SPRN_SRR1,r3
+       rfi
+       b       .               /* prevent prefetch past rfi */
+
+/* Load up the kernel context */
+2:
+       sync                    /* Flush to memory before changing TLB */
+       tlbia
+       isync                   /* Flush shadow TLBs */
+
+       /* set up the PTE pointers for the Abatron bdiGDB.
+       */
+       lis     r6, swapper_pg_dir@h
+       ori     r6, r6, swapper_pg_dir@l
+       lis     r5, abatron_pteptrs@h
+       ori     r5, r5, abatron_pteptrs@l
+       stw     r5, 0xf0(r0)    /* Must match your Abatron config file */
+       tophys(r5,r5)
+       stw     r6, 0(r5)
+
+/* Now turn on the MMU for real! */
+       lis     r4,MSR_KERNEL@h
+       ori     r4,r4,MSR_KERNEL@l
+       lis     r3,start_kernel@h
+       ori     r3,r3,start_kernel@l
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
+       rfi                     /* enable MMU and jump to start_kernel */
+       b       .               /* prevent prefetch past rfi */
+
+/* Set up the initial MMU state so we can do the first level of
+ * kernel initialization.  This maps the first 16 MBytes of memory 1:1
+ * virtual to physical and more importantly sets the cache mode.
+ */
+initial_mmu:
+       tlbia                   /* Invalidate all TLB entries */
+       isync
+
+       /* We should still be executing code at physical address 0x0000xxxx
+        * at this point. However, start_here is at virtual address
+        * 0xC000xxxx. So, set up a TLB mapping to cover this once
+        * translation is enabled.
+        */
+
+       lis     r3,KERNELBASE@h         /* Load the kernel virtual address */
+       ori     r3,r3,KERNELBASE@l
+       tophys(r4,r3)                   /* Load the kernel physical address */
+
+       iccci   r0,r3                   /* Invalidate the i-cache before use */
+
+       /* Load the kernel PID.
+       */
+       li      r0,0
+       mtspr   SPRN_PID,r0
+       sync
+
+       /* Configure and load two entries into TLB slots 62 and 63.
+        * In case we are pinning TLBs, these are reserved in by the
+        * other TLB functions.  If not reserving, then it doesn't
+        * matter where they are loaded.
+        */
+       clrrwi  r4,r4,10                /* Mask off the real page number */
+       ori     r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
+
+       clrrwi  r3,r3,10                /* Mask off the effective page number */
+       ori     r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
+
+        li      r0,63                    /* TLB slot 63 */
+
+       tlbwe   r4,r0,TLB_DATA          /* Load the data portion of the entry */
+       tlbwe   r3,r0,TLB_TAG           /* Load the tag portion of the entry */
+
+#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
+
+       /* Load a TLB entry for the UART, so that ppc4xx_progress() can use
+        * the UARTs nice and early.  We use a 4k real==virtual mapping. */
+
+       lis     r3,SERIAL_DEBUG_IO_BASE@h
+       ori     r3,r3,SERIAL_DEBUG_IO_BASE@l
+       mr      r4,r3
+       clrrwi  r4,r4,12
+       ori     r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
+
+       clrrwi  r3,r3,12
+       ori     r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
+
+       li      r0,0                    /* TLB slot 0 */
+       tlbwe   r4,r0,TLB_DATA
+       tlbwe   r3,r0,TLB_TAG
+#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
+
+       isync
+
+       /* Establish the exception vector base
+       */
+       lis     r4,KERNELBASE@h         /* EVPR only uses the high 16-bits */
+       tophys(r0,r4)                   /* Use the physical address */
+       mtspr   SPRN_EVPR,r0
+
+       blr
+
+_GLOBAL(abort)
+        mfspr   r13,SPRN_DBCR0
+        oris    r13,r13,DBCR0_RST_SYSTEM@h
+        mtspr   SPRN_DBCR0,r13
+
+_GLOBAL(set_context)
+
+#ifdef CONFIG_BDI_SWITCH
+       /* Context switch the PTE pointer for the Abatron BDI2000.
+        * The PGDIR is the second parameter.
+        */
+       lis     r5, KERNELBASE@h
+       lwz     r5, 0xf0(r5)
+       stw     r4, 0x4(r5)
+#endif
+       sync
+       mtspr   SPRN_PID,r3
+       isync                           /* Need an isync to flush shadow */
+                                       /* TLBs after changing PID */
+       blr
+
+/* We put a few things here that have to be page-aligned. This stuff
+ * goes at the beginning of the data segment, which is page-aligned.
+ */
+       .data
+       .align  12
+       .globl  sdata
+sdata:
+       .globl  empty_zero_page
+empty_zero_page:
+       .space  4096
+       .globl  swapper_pg_dir
+swapper_pg_dir:
+       .space  4096
+
+
+/* Stack for handling critical exceptions from kernel mode */
+       .section .bss
+        .align 12
+exception_stack_bottom:
+       .space  4096
+critical_stack_top:
+       .globl  exception_stack_top
+exception_stack_top:
+
+/* This space gets a copy of optional info passed to us by the bootstrap
+ * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+       .globl  cmd_line
+cmd_line:
+       .space  512
+
+/* Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+abatron_pteptrs:
+       .space  8
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
new file mode 100644 (file)
index 0000000..147215a
--- /dev/null
@@ -0,0 +1,1957 @@
+/*
+ *  arch/ppc64/kernel/head.S
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *  Adapted for Power Macintosh by Paul Mackerras.
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *
+ *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
+ *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
+ *
+ *  This file contains the low-level support and setup for the
+ *  PowerPC-64 platform, including trap and interrupt dispatch.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/systemcfg.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/bug.h>
+#include <asm/cputable.h>
+#include <asm/setup.h>
+#include <asm/hvcall.h>
+#include <asm/iSeries/LparMap.h>
+#include <asm/thread_info.h>
+
+#ifdef CONFIG_PPC_ISERIES
+#define DO_SOFT_DISABLE
+#endif
+
+/*
+ * We layout physical memory as follows:
+ * 0x0000 - 0x00ff : Secondary processor spin code
+ * 0x0100 - 0x2fff : pSeries Interrupt prologs
+ * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
+ * 0x6000 - 0x6fff : Initial (CPU0) segment table
+ * 0x7000 - 0x7fff : FWNMI data area
+ * 0x8000 -        : Early init and support code
+ */
+
+/*
+ *   SPRG Usage
+ *
+ *   Register  Definition
+ *
+ *   SPRG0     reserved for hypervisor
+ *   SPRG1     temp - used to save gpr
+ *   SPRG2     temp - used to save gpr
+ *   SPRG3     virt addr of paca
+ */
+
+/*
+ * Entering into this code we make the following assumptions:
+ *  For pSeries:
+ *   1. The MMU is off & open firmware is running in real mode.
+ *   2. The kernel is entered at __start
+ *
+ *  For iSeries:
+ *   1. The MMU is on (as it always is for iSeries)
+ *   2. The kernel is entered at system_reset_iSeries
+ */
+
+       .text
+       .globl  _stext
+_stext:
+#ifdef CONFIG_PPC_MULTIPLATFORM
+_GLOBAL(__start)
+       /* NOP this out unconditionally */
+BEGIN_FTR_SECTION
+       b       .__start_initialization_multiplatform
+END_FTR_SECTION(0, 1)
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+       /* Catch branch to 0 in real mode */
+       trap
+
+#ifdef CONFIG_PPC_ISERIES
+       /*
+        * At offset 0x20, there is a pointer to iSeries LPAR data.
+        * This is required by the hypervisor
+        */
+       . = 0x20
+       .llong hvReleaseData-KERNELBASE
+
+       /*
+        * At offset 0x28 and 0x30 are offsets to the mschunks_map
+        * array (used by the iSeries LPAR debugger to do translation
+        * between physical addresses and absolute addresses) and
+        * to the pidhash table (also used by the debugger)
+        */
+       .llong mschunks_map-KERNELBASE
+       .llong 0        /* pidhash-KERNELBASE SFRXXX */
+
+       /* Offset 0x38 - Pointer to start of embedded System.map */
+       .globl  embedded_sysmap_start
+embedded_sysmap_start:
+       .llong  0
+       /* Offset 0x40 - Pointer to end of embedded System.map */
+       .globl  embedded_sysmap_end
+embedded_sysmap_end:
+       .llong  0
+
+#endif /* CONFIG_PPC_ISERIES */
+
+       /* Secondary processors spin on this value until it goes to 1. */
+       .globl  __secondary_hold_spinloop
+__secondary_hold_spinloop:
+       .llong  0x0
+
+       /* Secondary processors write this value with their cpu # */
+       /* after they enter the spin loop immediately below.      */
+       .globl  __secondary_hold_acknowledge
+__secondary_hold_acknowledge:
+       .llong  0x0
+
+       . = 0x60
+/*
+ * The following code is used on pSeries to hold secondary processors
+ * in a spin loop after they have been freed from OpenFirmware, but
+ * before the bulk of the kernel has been relocated.  This code
+ * is relocated to physical address 0x60 before prom_init is run.
+ * All of it must fit below the first exception vector at 0x100.
+ */
+_GLOBAL(__secondary_hold)
+       mfmsr   r24
+       ori     r24,r24,MSR_RI
+       mtmsrd  r24                     /* RI on */
+
+       /* Grab our linux cpu number */
+       mr      r24,r3
+
+       /* Tell the master cpu we're here */
+       /* Relocation is off & we are located at an address less */
+       /* than 0x100, so only need to grab low order offset.    */
+       std     r24,__secondary_hold_acknowledge@l(0)
+       sync
+
+       /* All secondary cpus wait here until told to start. */
+100:   ld      r4,__secondary_hold_spinloop@l(0)
+       cmpdi   0,r4,1
+       bne     100b
+
+#ifdef CONFIG_HMT
+       b       .hmt_init
+#else
+#ifdef CONFIG_SMP
+       mr      r3,r24
+       b       .pSeries_secondary_smp_init
+#else
+       BUG_OPCODE
+#endif
+#endif
+
+/* This value is used to mark exception frames on the stack. */
+       .section ".toc","aw"
+exception_marker:
+       .tc     ID_72656773_68657265[TC],0x7265677368657265
+       .text
+
+/*
+ * The following macros define the code that appears as
+ * the prologue to each of the exception handlers.  They
+ * are split into two parts to allow a single kernel binary
+ * to be used for pSeries and iSeries.
+ * LOL.  One day... - paulus
+ */
+
+/*
+ * We make as much of the exception code common between native
+ * exception handlers (including pSeries LPAR) and iSeries LPAR
+ * implementations as possible.
+ */
+
+/*
+ * This is the start of the interrupt handlers for pSeries
+ * This code runs with relocation off.
+ */
+#define EX_R9          0
+#define EX_R10         8
+#define EX_R11         16
+#define EX_R12         24
+#define EX_R13         32
+#define EX_SRR0                40
+#define EX_R3          40      /* SLB miss saves R3, but not SRR0 */
+#define EX_DAR         48
+#define EX_LR          48      /* SLB miss saves LR, but not DAR */
+#define EX_DSISR       56
+#define EX_CCR         60
+
+#define EXCEPTION_PROLOG_PSERIES(area, label)                          \
+       mfspr   r13,SPRN_SPRG3;         /* get paca address into r13 */ \
+       std     r9,area+EX_R9(r13);     /* save r9 - r12 */             \
+       std     r10,area+EX_R10(r13);                                   \
+       std     r11,area+EX_R11(r13);                                   \
+       std     r12,area+EX_R12(r13);                                   \
+       mfspr   r9,SPRN_SPRG1;                                          \
+       std     r9,area+EX_R13(r13);                                    \
+       mfcr    r9;                                                     \
+       clrrdi  r12,r13,32;             /* get high part of &label */   \
+       mfmsr   r10;                                                    \
+       mfspr   r11,SPRN_SRR0;          /* save SRR0 */                 \
+       ori     r12,r12,(label)@l;      /* virt addr of handler */      \
+       ori     r10,r10,MSR_IR|MSR_DR|MSR_RI;                           \
+       mtspr   SPRN_SRR0,r12;                                          \
+       mfspr   r12,SPRN_SRR1;          /* and SRR1 */                  \
+       mtspr   SPRN_SRR1,r10;                                          \
+       rfid;                                                           \
+       b       .       /* prevent speculative execution */
+
+/*
+ * This is the start of the interrupt handlers for iSeries
+ * This code runs with relocation on.
+ */
+#define EXCEPTION_PROLOG_ISERIES_1(area)                               \
+       mfspr   r13,SPRN_SPRG3;         /* get paca address into r13 */ \
+       std     r9,area+EX_R9(r13);     /* save r9 - r12 */             \
+       std     r10,area+EX_R10(r13);                                   \
+       std     r11,area+EX_R11(r13);                                   \
+       std     r12,area+EX_R12(r13);                                   \
+       mfspr   r9,SPRN_SPRG1;                                          \
+       std     r9,area+EX_R13(r13);                                    \
+       mfcr    r9
+
+#define EXCEPTION_PROLOG_ISERIES_2                                     \
+       mfmsr   r10;                                                    \
+       ld      r11,PACALPPACA+LPPACASRR0(r13);                         \
+       ld      r12,PACALPPACA+LPPACASRR1(r13);                         \
+       ori     r10,r10,MSR_RI;                                         \
+       mtmsrd  r10,1
+
+/*
+ * The common exception prolog is used for all except a few exceptions
+ * such as a segment miss on a kernel address.  We have to be prepared
+ * to take another exception from the point where we first touch the
+ * kernel stack onwards.
+ *
+ * On entry r13 points to the paca, r9-r13 are saved in the paca,
+ * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
+ * SRR1, and relocation is on.
+ */
+#define EXCEPTION_PROLOG_COMMON(n, area)                                  \
+       andi.   r10,r12,MSR_PR;         /* See if coming from user      */ \
+       mr      r10,r1;                 /* Save r1                      */ \
+       subi    r1,r1,INT_FRAME_SIZE;   /* alloc frame on kernel stack  */ \
+       beq-    1f;                                                        \
+       ld      r1,PACAKSAVE(r13);      /* kernel stack to use          */ \
+1:     cmpdi   cr1,r1,0;               /* check if r1 is in userspace  */ \
+       bge-    cr1,bad_stack;          /* abort if it is               */ \
+       std     r9,_CCR(r1);            /* save CR in stackframe        */ \
+       std     r11,_NIP(r1);           /* save SRR0 in stackframe      */ \
+       std     r12,_MSR(r1);           /* save SRR1 in stackframe      */ \
+       std     r10,0(r1);              /* make stack chain pointer     */ \
+       std     r0,GPR0(r1);            /* save r0 in stackframe        */ \
+       std     r10,GPR1(r1);           /* save r1 in stackframe        */ \
+       std     r2,GPR2(r1);            /* save r2 in stackframe        */ \
+       SAVE_4GPRS(3, r1);              /* save r3 - r6 in stackframe   */ \
+       SAVE_2GPRS(7, r1);              /* save r7, r8 in stackframe    */ \
+       ld      r9,area+EX_R9(r13);     /* move r9, r10 to stackframe   */ \
+       ld      r10,area+EX_R10(r13);                                      \
+       std     r9,GPR9(r1);                                               \
+       std     r10,GPR10(r1);                                             \
+       ld      r9,area+EX_R11(r13);    /* move r11 - r13 to stackframe */ \
+       ld      r10,area+EX_R12(r13);                                      \
+       ld      r11,area+EX_R13(r13);                                      \
+       std     r9,GPR11(r1);                                              \
+       std     r10,GPR12(r1);                                             \
+       std     r11,GPR13(r1);                                             \
+       ld      r2,PACATOC(r13);        /* get kernel TOC into r2       */ \
+       mflr    r9;                     /* save LR in stackframe        */ \
+       std     r9,_LINK(r1);                                              \
+       mfctr   r10;                    /* save CTR in stackframe       */ \
+       std     r10,_CTR(r1);                                              \
+       mfspr   r11,SPRN_XER;           /* save XER in stackframe       */ \
+       std     r11,_XER(r1);                                              \
+       li      r9,(n)+1;                                                  \
+       std     r9,_TRAP(r1);           /* set trap number              */ \
+       li      r10,0;                                                     \
+       ld      r11,exception_marker@toc(r2);                              \
+       std     r10,RESULT(r1);         /* clear regs->result           */ \
+       std     r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame      */
+
+/*
+ * Exception vectors.
+ */
+#define STD_EXCEPTION_PSERIES(n, label)                        \
+       . = n;                                          \
+       .globl label##_pSeries;                         \
+label##_pSeries:                                       \
+       HMT_MEDIUM;                                     \
+       mtspr   SPRN_SPRG1,r13;         /* save r13 */  \
+       RUNLATCH_ON(r13);                               \
+       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+
+#define STD_EXCEPTION_ISERIES(n, label, area)          \
+       .globl label##_iSeries;                         \
+label##_iSeries:                                       \
+       HMT_MEDIUM;                                     \
+       mtspr   SPRN_SPRG1,r13;         /* save r13 */  \
+       RUNLATCH_ON(r13);                               \
+       EXCEPTION_PROLOG_ISERIES_1(area);               \
+       EXCEPTION_PROLOG_ISERIES_2;                     \
+       b       label##_common
+
+#define MASKABLE_EXCEPTION_ISERIES(n, label)                           \
+       .globl label##_iSeries;                                         \
+label##_iSeries:                                                       \
+       HMT_MEDIUM;                                                     \
+       mtspr   SPRN_SPRG1,r13;         /* save r13 */                  \
+       RUNLATCH_ON(r13);                                               \
+       EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);                         \
+       lbz     r10,PACAPROCENABLED(r13);                               \
+       cmpwi   0,r10,0;                                                \
+       beq-    label##_iSeries_masked;                                 \
+       EXCEPTION_PROLOG_ISERIES_2;                                     \
+       b       label##_common;                                         \
+
+#ifdef DO_SOFT_DISABLE
+#define DISABLE_INTS                           \
+       lbz     r10,PACAPROCENABLED(r13);       \
+       li      r11,0;                          \
+       std     r10,SOFTE(r1);                  \
+       mfmsr   r10;                            \
+       stb     r11,PACAPROCENABLED(r13);       \
+       ori     r10,r10,MSR_EE;                 \
+       mtmsrd  r10,1
+
+#define ENABLE_INTS                            \
+       lbz     r10,PACAPROCENABLED(r13);       \
+       mfmsr   r11;                            \
+       std     r10,SOFTE(r1);                  \
+       ori     r11,r11,MSR_EE;                 \
+       mtmsrd  r11,1
+
+#else  /* hard enable/disable interrupts */
+#define DISABLE_INTS
+
+#define ENABLE_INTS                            \
+       ld      r12,_MSR(r1);                   \
+       mfmsr   r11;                            \
+       rlwimi  r11,r12,0,MSR_EE;               \
+       mtmsrd  r11,1
+
+#endif
+
+#define STD_EXCEPTION_COMMON(trap, label, hdlr)                \
+       .align  7;                                      \
+       .globl label##_common;                          \
+label##_common:                                                \
+       EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);      \
+       DISABLE_INTS;                                   \
+       bl      .save_nvgprs;                           \
+       addi    r3,r1,STACK_FRAME_OVERHEAD;             \
+       bl      hdlr;                                   \
+       b       .ret_from_except
+
+#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr)   \
+       .align  7;                                      \
+       .globl label##_common;                          \
+label##_common:                                                \
+       EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);      \
+       DISABLE_INTS;                                   \
+       addi    r3,r1,STACK_FRAME_OVERHEAD;             \
+       bl      hdlr;                                   \
+       b       .ret_from_except_lite
+
+/*
+ * Start of pSeries system interrupt routines
+ */
+       . = 0x100
+       .globl __start_interrupts
+__start_interrupts:
+
+       STD_EXCEPTION_PSERIES(0x100, system_reset)
+
+       . = 0x200
+_machine_check_pSeries:
+       HMT_MEDIUM
+       mtspr   SPRN_SPRG1,r13          /* save r13 */
+       RUNLATCH_ON(r13)
+       EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+
+       . = 0x300
+       .globl data_access_pSeries
+data_access_pSeries:
+       HMT_MEDIUM
+       mtspr   SPRN_SPRG1,r13
+BEGIN_FTR_SECTION
+       mtspr   SPRN_SPRG2,r12
+       mfspr   r13,SPRN_DAR
+       mfspr   r12,SPRN_DSISR
+       srdi    r13,r13,60
+       rlwimi  r13,r12,16,0x20
+       mfcr    r12
+       cmpwi   r13,0x2c
+       beq     .do_stab_bolted_pSeries
+       mtcrf   0x80,r12
+       mfspr   r12,SPRN_SPRG2
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
+
+       . = 0x380
+       .globl data_access_slb_pSeries
+data_access_slb_pSeries:
+       HMT_MEDIUM
+       mtspr   SPRN_SPRG1,r13
+       RUNLATCH_ON(r13)
+       mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
+       std     r9,PACA_EXSLB+EX_R9(r13)        /* save r9 - r12 */
+       std     r10,PACA_EXSLB+EX_R10(r13)
+       std     r11,PACA_EXSLB+EX_R11(r13)
+       std     r12,PACA_EXSLB+EX_R12(r13)
+       std     r3,PACA_EXSLB+EX_R3(r13)
+       mfspr   r9,SPRN_SPRG1
+       std     r9,PACA_EXSLB+EX_R13(r13)
+       mfcr    r9
+       mfspr   r12,SPRN_SRR1           /* and SRR1 */
+       mfspr   r3,SPRN_DAR
+       b       .do_slb_miss            /* Rel. branch works in real mode */
+
+       STD_EXCEPTION_PSERIES(0x400, instruction_access)
+
+       . = 0x480
+       .globl instruction_access_slb_pSeries
+instruction_access_slb_pSeries:
+       HMT_MEDIUM
+       mtspr   SPRN_SPRG1,r13
+       RUNLATCH_ON(r13)
+       mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
+       std     r9,PACA_EXSLB+EX_R9(r13)        /* save r9 - r12 */
+       std     r10,PACA_EXSLB+EX_R10(r13)
+       std     r11,PACA_EXSLB+EX_R11(r13)
+       std     r12,PACA_EXSLB+EX_R12(r13)
+       std     r3,PACA_EXSLB+EX_R3(r13)
+       mfspr   r9,SPRN_SPRG1
+       std     r9,PACA_EXSLB+EX_R13(r13)
+       mfcr    r9
+       mfspr   r12,SPRN_SRR1           /* and SRR1 */
+       mfspr   r3,SPRN_SRR0                    /* SRR0 is faulting address */
+       b       .do_slb_miss            /* Rel. branch works in real mode */
+
+       STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
+       STD_EXCEPTION_PSERIES(0x600, alignment)
+       STD_EXCEPTION_PSERIES(0x700, program_check)
+       STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
+       STD_EXCEPTION_PSERIES(0x900, decrementer)
+       STD_EXCEPTION_PSERIES(0xa00, trap_0a)
+       STD_EXCEPTION_PSERIES(0xb00, trap_0b)
+
+       . = 0xc00
+       .globl  system_call_pSeries
+system_call_pSeries:
+       HMT_MEDIUM
+       RUNLATCH_ON(r9)
+       mr      r9,r13
+       mfmsr   r10
+       mfspr   r13,SPRN_SPRG3
+       mfspr   r11,SPRN_SRR0
+       clrrdi  r12,r13,32
+       oris    r12,r12,system_call_common@h
+       ori     r12,r12,system_call_common@l
+       mtspr   SPRN_SRR0,r12
+       ori     r10,r10,MSR_IR|MSR_DR|MSR_RI
+       mfspr   r12,SPRN_SRR1
+       mtspr   SPRN_SRR1,r10
+       rfid
+       b       .       /* prevent speculative execution */
+
+       STD_EXCEPTION_PSERIES(0xd00, single_step)
+       STD_EXCEPTION_PSERIES(0xe00, trap_0e)
+
+       /* We need to deal with the Altivec unavailable exception
+        * here which is at 0xf20, thus in the middle of the
+        * prolog code of the PerformanceMonitor one. A little
+        * trickery is thus necessary
+        */
+       . = 0xf00
+       b       performance_monitor_pSeries
+
+       STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
+
+       STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+       STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+
+       . = 0x3000
+
+/*** pSeries interrupt support ***/
+
+       /* moved from 0xf00 */
+       STD_EXCEPTION_PSERIES(., performance_monitor)
+
+       .align  7
+_GLOBAL(do_stab_bolted_pSeries)
+       mtcrf   0x80,r12
+       mfspr   r12,SPRN_SPRG2
+       EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
+
+/*
+ * Vectors for the FWNMI option.  Share common code.
+ */
+       .globl system_reset_fwnmi
+system_reset_fwnmi:
+       HMT_MEDIUM
+       mtspr   SPRN_SPRG1,r13          /* save r13 */
+       RUNLATCH_ON(r13)
+       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
+
+       .globl machine_check_fwnmi
+machine_check_fwnmi:
+       HMT_MEDIUM
+       mtspr   SPRN_SPRG1,r13          /* save r13 */
+       RUNLATCH_ON(r13)
+       EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+
+#ifdef CONFIG_PPC_ISERIES
+/***  ISeries-LPAR interrupt handlers ***/
+
+       STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
+
+       .globl data_access_iSeries
+data_access_iSeries:
+       mtspr   SPRN_SPRG1,r13
+BEGIN_FTR_SECTION
+       mtspr   SPRN_SPRG2,r12
+       mfspr   r13,SPRN_DAR
+       mfspr   r12,SPRN_DSISR
+       srdi    r13,r13,60
+       rlwimi  r13,r12,16,0x20
+       mfcr    r12
+       cmpwi   r13,0x2c
+       beq     .do_stab_bolted_iSeries
+       mtcrf   0x80,r12
+       mfspr   r12,SPRN_SPRG2
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+       EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
+       EXCEPTION_PROLOG_ISERIES_2
+       b       data_access_common
+
+.do_stab_bolted_iSeries:
+       mtcrf   0x80,r12
+       mfspr   r12,SPRN_SPRG2
+       EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+       EXCEPTION_PROLOG_ISERIES_2
+       b       .do_stab_bolted
+
+       .globl  data_access_slb_iSeries
+data_access_slb_iSeries:
+       mtspr   SPRN_SPRG1,r13          /* save r13 */
+       EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+       std     r3,PACA_EXSLB+EX_R3(r13)
+       ld      r12,PACALPPACA+LPPACASRR1(r13)
+       mfspr   r3,SPRN_DAR
+       b       .do_slb_miss
+
+       STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
+
+       .globl  instruction_access_slb_iSeries
+instruction_access_slb_iSeries:
+       mtspr   SPRN_SPRG1,r13          /* save r13 */
+       EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+       std     r3,PACA_EXSLB+EX_R3(r13)
+       ld      r12,PACALPPACA+LPPACASRR1(r13)
+       ld      r3,PACALPPACA+LPPACASRR0(r13)
+       b       .do_slb_miss
+
+       MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
+       STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
+       STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
+       STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
+       MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
+       STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
+       STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
+
+       .globl  system_call_iSeries
+system_call_iSeries:
+       mr      r9,r13
+       mfspr   r13,SPRN_SPRG3
+       EXCEPTION_PROLOG_ISERIES_2
+       b       system_call_common
+
+       STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
+       STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
+       STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
+
+       .globl system_reset_iSeries
+system_reset_iSeries:
+       mfspr   r13,SPRN_SPRG3          /* Get paca address */
+       mfmsr   r24
+       ori     r24,r24,MSR_RI
+       mtmsrd  r24                     /* RI on */
+       lhz     r24,PACAPACAINDEX(r13)  /* Get processor # */
+       cmpwi   0,r24,0                 /* Are we processor 0? */
+       beq     .__start_initialization_iSeries /* Start up the first processor */
+       mfspr   r4,SPRN_CTRLF
+       li      r5,CTRL_RUNLATCH        /* Turn off the run light */
+       andc    r4,r4,r5
+       mtspr   SPRN_CTRLT,r4
+
+1:
+       HMT_LOW
+#ifdef CONFIG_SMP
+       lbz     r23,PACAPROCSTART(r13)  /* Test if this processor
+                                        * should start */
+       sync
+       LOADADDR(r3,current_set)
+       sldi    r28,r24,3               /* get current_set[cpu#] */
+       ldx     r3,r3,r28
+       addi    r1,r3,THREAD_SIZE
+       subi    r1,r1,STACK_FRAME_OVERHEAD
+
+       cmpwi   0,r23,0
+       beq     iSeries_secondary_smp_loop      /* Loop until told to go */
+       bne     .__secondary_start              /* Loop until told to go */
+iSeries_secondary_smp_loop:
+       /* Let the Hypervisor know we are alive */
+       /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
+       lis     r3,0x8002
+       rldicr  r3,r3,32,15             /* r0 = (r3 << 32) & 0xffff000000000000 */
+#else /* CONFIG_SMP */
+       /* Yield the processor.  This is required for non-SMP kernels
+               which are running on multi-threaded machines. */
+       lis     r3,0x8000
+       rldicr  r3,r3,32,15             /* r3 = (r3 << 32) & 0xffff000000000000 */
+       addi    r3,r3,18                /* r3 = 0x8000000000000012 which is "yield" */
+       li      r4,0                    /* "yield timed" */
+       li      r5,-1                   /* "yield forever" */
+#endif /* CONFIG_SMP */
+       li      r0,-1                   /* r0=-1 indicates a Hypervisor call */
+       sc                              /* Invoke the hypervisor via a system call */
+       mfspr   r13,SPRN_SPRG3          /* Put r13 back ???? */
+       b       1b                      /* If SMP not configured, secondaries
+                                        * loop forever */
+
+       .globl decrementer_iSeries_masked
+decrementer_iSeries_masked:
+       li      r11,1
+       stb     r11,PACALPPACA+LPPACADECRINT(r13)
+       lwz     r12,PACADEFAULTDECR(r13)
+       mtspr   SPRN_DEC,r12
+       /* fall through */
+
+       .globl hardware_interrupt_iSeries_masked
+hardware_interrupt_iSeries_masked:
+       mtcrf   0x80,r9         /* Restore regs */
+       ld      r11,PACALPPACA+LPPACASRR0(r13)
+       ld      r12,PACALPPACA+LPPACASRR1(r13)
+       mtspr   SPRN_SRR0,r11
+       mtspr   SPRN_SRR1,r12
+       ld      r9,PACA_EXGEN+EX_R9(r13)
+       ld      r10,PACA_EXGEN+EX_R10(r13)
+       ld      r11,PACA_EXGEN+EX_R11(r13)
+       ld      r12,PACA_EXGEN+EX_R12(r13)
+       ld      r13,PACA_EXGEN+EX_R13(r13)
+       rfid
+       b       .       /* prevent speculative execution */
+#endif /* CONFIG_PPC_ISERIES */
+
+/*** Common interrupt handlers ***/
+
+       STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
+
+       /*
+        * Machine check is different because we use a different
+        * save area: PACA_EXMC instead of PACA_EXGEN.
+        */
+       .align  7
+       .globl machine_check_common
+machine_check_common:
+       EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+       DISABLE_INTS
+       bl      .save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .machine_check_exception
+       b       .ret_from_except
+
+       STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
+       STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
+       STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
+       STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
+       STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
+       STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
+       STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
+#ifdef CONFIG_ALTIVEC
+       STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
+#else
+       STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
+#endif
+
+/*
+ * Here we have detected that the kernel stack pointer is bad.
+ * R9 contains the saved CR, r13 points to the paca,
+ * r10 contains the (bad) kernel stack pointer,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * We switch to using an emergency stack, save the registers there,
+ * and call kernel_bad_stack(), which panics.
+ */
+bad_stack:
+       ld      r1,PACAEMERGSP(r13)
+       subi    r1,r1,64+INT_FRAME_SIZE
+       std     r9,_CCR(r1)
+       std     r10,GPR1(r1)
+       std     r11,_NIP(r1)
+       std     r12,_MSR(r1)
+       mfspr   r11,SPRN_DAR
+       mfspr   r12,SPRN_DSISR
+       std     r11,_DAR(r1)
+       std     r12,_DSISR(r1)
+       mflr    r10
+       mfctr   r11
+       mfxer   r12
+       std     r10,_LINK(r1)
+       std     r11,_CTR(r1)
+       std     r12,_XER(r1)
+       SAVE_GPR(0,r1)
+       SAVE_GPR(2,r1)
+       SAVE_4GPRS(3,r1)
+       SAVE_2GPRS(7,r1)
+       SAVE_10GPRS(12,r1)
+       SAVE_10GPRS(22,r1)
+       addi    r11,r1,INT_FRAME_SIZE
+       std     r11,0(r1)
+       li      r12,0
+       std     r12,0(r11)
+       ld      r2,PACATOC(r13)
+1:     addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .kernel_bad_stack
+       b       1b
+
+/*
+ * Return from an exception with minimal checks.
+ * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
+ * If interrupts have been enabled, or anything has been
+ * done that might have changed the scheduling status of
+ * any task or sent any task a signal, you should use
+ * ret_from_except or ret_from_except_lite instead of this.
+ */
+       .globl  fast_exception_return
+fast_exception_return:
+       ld      r12,_MSR(r1)
+       ld      r11,_NIP(r1)
+       andi.   r3,r12,MSR_RI           /* check if RI is set */
+       beq-    unrecov_fer
+       ld      r3,_CCR(r1)
+       ld      r4,_LINK(r1)
+       ld      r5,_CTR(r1)
+       ld      r6,_XER(r1)
+       mtcr    r3
+       mtlr    r4
+       mtctr   r5
+       mtxer   r6
+       REST_GPR(0, r1)
+       REST_8GPRS(2, r1)
+
+       mfmsr   r10
+       clrrdi  r10,r10,2               /* clear RI (LE is 0 already) */
+       mtmsrd  r10,1
+
+       mtspr   SPRN_SRR1,r12
+       mtspr   SPRN_SRR0,r11
+       REST_4GPRS(10, r1)
+       ld      r1,GPR1(r1)
+       rfid
+       b       .       /* prevent speculative execution */
+
+unrecov_fer:
+       bl      .save_nvgprs
+1:     addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .unrecoverable_exception
+       b       1b
+
+/*
+ * Here r13 points to the paca, r9 contains the saved CR,
+ * SRR0 and SRR1 are saved in r11 and r12,
+ * r9 - r13 are saved in paca->exgen.
+ */
+       .align  7
+       .globl data_access_common
+data_access_common:
+       RUNLATCH_ON(r10)                /* It wont fit in the 0x300 handler */
+       mfspr   r10,SPRN_DAR
+       std     r10,PACA_EXGEN+EX_DAR(r13)
+       mfspr   r10,SPRN_DSISR
+       stw     r10,PACA_EXGEN+EX_DSISR(r13)
+       EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
+       ld      r3,PACA_EXGEN+EX_DAR(r13)
+       lwz     r4,PACA_EXGEN+EX_DSISR(r13)
+       li      r5,0x300
+       b       .do_hash_page           /* Try to handle as hpte fault */
+
+       .align  7
+       .globl instruction_access_common
+instruction_access_common:
+       EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
+       ld      r3,_NIP(r1)
+       andis.  r4,r12,0x5820
+       li      r5,0x400
+       b       .do_hash_page           /* Try to handle as hpte fault */
+
+       .align  7
+       .globl hardware_interrupt_common
+       .globl hardware_interrupt_entry
+hardware_interrupt_common:
+       EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
+hardware_interrupt_entry:
+       DISABLE_INTS
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .do_IRQ
+       b       .ret_from_except_lite
+
+       .align  7
+       .globl alignment_common
+alignment_common:
+       mfspr   r10,SPRN_DAR
+       std     r10,PACA_EXGEN+EX_DAR(r13)
+       mfspr   r10,SPRN_DSISR
+       stw     r10,PACA_EXGEN+EX_DSISR(r13)
+       EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
+       ld      r3,PACA_EXGEN+EX_DAR(r13)
+       lwz     r4,PACA_EXGEN+EX_DSISR(r13)
+       std     r3,_DAR(r1)
+       std     r4,_DSISR(r1)
+       bl      .save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       ENABLE_INTS
+       bl      .alignment_exception
+       b       .ret_from_except
+
+       .align  7
+       .globl program_check_common
+program_check_common:
+       EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+       bl      .save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       ENABLE_INTS
+       bl      .program_check_exception
+       b       .ret_from_except
+
+       .align  7
+       .globl fp_unavailable_common
+fp_unavailable_common:
+       EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
+       bne     .load_up_fpu            /* if from user, just load it up */
+       bl      .save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       ENABLE_INTS
+       bl      .kernel_fp_unavailable_exception
+       BUG_OPCODE
+
+       .align  7
+       .globl altivec_unavailable_common
+altivec_unavailable_common:
+       EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+       bne     .load_up_altivec        /* if from user, just load it up */
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+       bl      .save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       ENABLE_INTS
+       bl      .altivec_unavailable_exception
+       b       .ret_from_except
+
+#ifdef CONFIG_ALTIVEC
+/*
+ * load_up_altivec(unused, unused, tsk)
+ * Disable VMX for the task which had it previously,
+ * and save its vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ * On SMP we know the VMX is free, since we give it up every
+ * switch (ie, no lazy save of the vector registers).
+ * On entry: r13 == 'current' && last_task_used_altivec != 'current'
+ */
+_STATIC(load_up_altivec)
+       mfmsr   r5                      /* grab the current MSR */
+       oris    r5,r5,MSR_VEC@h
+       mtmsrd  r5                      /* enable use of VMX now */
+       isync
+
+/*
+ * For SMP, we don't do lazy VMX switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another.  Instead we call giveup_altvec in switch_to.
+ * VRSAVE isn't dealt with here, that is done in the normal context
+ * switch code. Note that we could rely on vrsave value to eventually
+ * avoid saving all of the VREGs here...
+ */
+#ifndef CONFIG_SMP
+       ld      r3,last_task_used_altivec@got(r2)
+       ld      r4,0(r3)
+       cmpdi   0,r4,0
+       beq     1f
+       /* Save VMX state to last_task_used_altivec's THREAD struct */
+       addi    r4,r4,THREAD
+       SAVE_32VRS(0,r5,r4)
+       mfvscr  vr0
+       li      r10,THREAD_VSCR
+       stvx    vr0,r10,r4
+       /* Disable VMX for last_task_used_altivec */
+       ld      r5,PT_REGS(r4)
+       ld      r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       lis     r6,MSR_VEC@h
+       andc    r4,r4,r6
+       std     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+       /* Hack: if we get an altivec unavailable trap with VRSAVE
+        * set to all zeros, we assume this is a broken application
+        * that fails to set it properly, and thus we switch it to
+        * all 1's
+        */
+       mfspr   r4,SPRN_VRSAVE
+       cmpdi   0,r4,0
+       bne+    1f
+       li      r4,-1
+       mtspr   SPRN_VRSAVE,r4
+1:
+       /* enable use of VMX after return */
+       ld      r4,PACACURRENT(r13)
+       addi    r5,r4,THREAD            /* Get THREAD */
+       oris    r12,r12,MSR_VEC@h
+       std     r12,_MSR(r1)
+       li      r4,1
+       li      r10,THREAD_VSCR
+       stw     r4,THREAD_USED_VR(r5)
+       lvx     vr0,r10,r5
+       mtvscr  vr0
+       REST_32VRS(0,r4,r5)
+#ifndef CONFIG_SMP
+       /* Update last_task_used_math to 'current' */
+       subi    r4,r5,THREAD            /* Back to 'current' */
+       std     r4,0(r3)
+#endif /* CONFIG_SMP */
+       /* restore registers and return */
+       b       fast_exception_return
+#endif /* CONFIG_ALTIVEC */
+
+/*
+ * Hash table stuff
+ */
+       .align  7
+_GLOBAL(do_hash_page)
+       std     r3,_DAR(r1)
+       std     r4,_DSISR(r1)
+
+       andis.  r0,r4,0xa450            /* weird error? */
+       bne-    .handle_page_fault      /* if not, try to insert a HPTE */
+BEGIN_FTR_SECTION
+       andis.  r0,r4,0x0020            /* Is it a segment table fault? */
+       bne-    .do_ste_alloc           /* If so handle it */
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+
+       /*
+        * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
+        * accessing a userspace segment (even from the kernel). We assume
+        * kernel addresses always have the high bit set.
+        */
+       rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
+       rotldi  r0,r3,15                /* Move high bit into MSR_PR posn */
+       orc     r0,r12,r0               /* MSR_PR | ~high_bit */
+       rlwimi  r4,r0,32-13,30,30       /* becomes _PAGE_USER access bit */
+       ori     r4,r4,1                 /* add _PAGE_PRESENT */
+       rlwimi  r4,r5,22+2,31-2,31-2    /* Set _PAGE_EXEC if trap is 0x400 */
+
+       /*
+        * On iSeries, we soft-disable interrupts here, then
+        * hard-enable interrupts so that the hash_page code can spin on
+        * the hash_table_lock without problems on a shared processor.
+        */
+       DISABLE_INTS
+
+       /*
+        * r3 contains the faulting address
+        * r4 contains the required access permissions
+        * r5 contains the trap number
+        *
+        * at return r3 = 0 for success
+        */
+       bl      .hash_page              /* build HPTE if possible */
+       cmpdi   r3,0                    /* see if hash_page succeeded */
+
+#ifdef DO_SOFT_DISABLE
+       /*
+        * If we had interrupts soft-enabled at the point where the
+        * DSI/ISI occurred, and an interrupt came in during hash_page,
+        * handle it now.
+        * We jump to ret_from_except_lite rather than fast_exception_return
+        * because ret_from_except_lite will check for and handle pending
+        * interrupts if necessary.
+        */
+       beq     .ret_from_except_lite
+       /* For a hash failure, we don't bother re-enabling interrupts */
+       ble-    12f
+
+       /*
+        * hash_page couldn't handle it, set soft interrupt enable back
+        * to what it was before the trap.  Note that .local_irq_restore
+        * handles any interrupts pending at this point.
+        */
+       ld      r3,SOFTE(r1)
+       bl      .local_irq_restore
+       b       11f
+#else
+       beq     fast_exception_return   /* Return from exception on success */
+       ble-    12f                     /* Failure return from hash_page */
+
+       /* fall through */
+#endif
+
+/* Here we have a page fault that hash_page can't handle. */
+_GLOBAL(handle_page_fault)
+       ENABLE_INTS
+11:    ld      r4,_DAR(r1)
+       ld      r5,_DSISR(r1)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .do_page_fault
+       cmpdi   r3,0
+       beq+    .ret_from_except_lite
+       bl      .save_nvgprs
+       mr      r5,r3
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       lwz     r4,_DAR(r1)
+       bl      .bad_page_fault
+       b       .ret_from_except
+
+/* We have a page fault that hash_page could handle but HV refused
+ * the PTE insertion
+ */
+12:    bl      .save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       lwz     r4,_DAR(r1)
+       bl      .low_hash_fault
+       b       .ret_from_except
+
+       /* here we have a segment miss */
+_GLOBAL(do_ste_alloc)
+       bl      .ste_allocate           /* try to insert stab entry */
+       cmpdi   r3,0
+       beq+    fast_exception_return
+       b       .handle_page_fault
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * r9 - r13 are saved in paca->exslb.
+ * We assume we aren't going to take any exceptions during this procedure.
+ * We assume (DAR >> 60) == 0xc.
+ */
+       .align  7
+_GLOBAL(do_stab_bolted)
+       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
+       std     r11,PACA_EXSLB+EX_SRR0(r13)     /* save SRR0 in exc. frame */
+
+       /* Hash to the primary group */
+       ld      r10,PACASTABVIRT(r13)
+       mfspr   r11,SPRN_DAR
+       srdi    r11,r11,28
+       rldimi  r10,r11,7,52    /* r10 = first ste of the group */
+
+       /* Calculate VSID */
+       /* This is a kernel address, so protovsid = ESID */
+       ASM_VSID_SCRAMBLE(r11, r9)
+       rldic   r9,r11,12,16    /* r9 = vsid << 12 */
+
+       /* Search the primary group for a free entry */
+1:     ld      r11,0(r10)      /* Test valid bit of the current ste    */
+       andi.   r11,r11,0x80
+       beq     2f
+       addi    r10,r10,16
+       andi.   r11,r10,0x70
+       bne     1b
+
+       /* Stick for only searching the primary group for now.          */
+       /* At least for now, we use a very simple random castout scheme */
+       /* Use the TB as a random number ;  OR in 1 to avoid entry 0    */
+       mftb    r11
+       rldic   r11,r11,4,57    /* r11 = (r11 << 4) & 0x70 */
+       ori     r11,r11,0x10
+
+       /* r10 currently points to an ste one past the group of interest */
+       /* make it point to the randomly selected entry                 */
+       subi    r10,r10,128
+       or      r10,r10,r11     /* r10 is the entry to invalidate       */
+
+       isync                   /* mark the entry invalid               */
+       ld      r11,0(r10)
+       rldicl  r11,r11,56,1    /* clear the valid bit */
+       rotldi  r11,r11,8
+       std     r11,0(r10)
+       sync
+
+       clrrdi  r11,r11,28      /* Get the esid part of the ste         */
+       slbie   r11
+
+2:     std     r9,8(r10)       /* Store the vsid part of the ste       */
+       eieio
+
+       mfspr   r11,SPRN_DAR            /* Get the new esid                     */
+       clrrdi  r11,r11,28      /* Permits a full 32b of ESID           */
+       ori     r11,r11,0x90    /* Turn on valid and kp                 */
+       std     r11,0(r10)      /* Put new entry back into the stab     */
+
+       sync
+
+       /* All done -- return from exception. */
+       lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
+       ld      r11,PACA_EXSLB+EX_SRR0(r13)     /* get saved SRR0 */
+
+       andi.   r10,r12,MSR_RI
+       beq-    unrecov_slb
+
+       mtcrf   0x80,r9                 /* restore CR */
+
+       mfmsr   r10
+       clrrdi  r10,r10,2
+       mtmsrd  r10,1
+
+       mtspr   SPRN_SRR0,r11
+       mtspr   SPRN_SRR1,r12
+       ld      r9,PACA_EXSLB+EX_R9(r13)
+       ld      r10,PACA_EXSLB+EX_R10(r13)
+       ld      r11,PACA_EXSLB+EX_R11(r13)
+       ld      r12,PACA_EXSLB+EX_R12(r13)
+       ld      r13,PACA_EXSLB+EX_R13(r13)
+       rfid
+       b       .       /* prevent speculative execution */
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(do_slb_miss)
+       mflr    r10
+
+       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
+       std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
+
+       bl      .slb_allocate                   /* handle it */
+
+       /* All done -- return from exception. */
+
+       ld      r10,PACA_EXSLB+EX_LR(r13)
+       ld      r3,PACA_EXSLB+EX_R3(r13)
+       lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
+#ifdef CONFIG_PPC_ISERIES
+       ld      r11,PACALPPACA+LPPACASRR0(r13)  /* get SRR0 value */
+#endif /* CONFIG_PPC_ISERIES */
+
+       mtlr    r10
+
+       andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
+       beq-    unrecov_slb
+
+.machine       push
+.machine       "power4"
+       mtcrf   0x80,r9
+       mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
+.machine       pop
+
+#ifdef CONFIG_PPC_ISERIES
+       mtspr   SPRN_SRR0,r11
+       mtspr   SPRN_SRR1,r12
+#endif /* CONFIG_PPC_ISERIES */
+       ld      r9,PACA_EXSLB+EX_R9(r13)
+       ld      r10,PACA_EXSLB+EX_R10(r13)
+       ld      r11,PACA_EXSLB+EX_R11(r13)
+       ld      r12,PACA_EXSLB+EX_R12(r13)
+       ld      r13,PACA_EXSLB+EX_R13(r13)
+       rfid
+       b       .       /* prevent speculative execution */
+
+unrecov_slb:
+       EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+       DISABLE_INTS
+       bl      .save_nvgprs
+1:     addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .unrecoverable_exception
+       b       1b
+
+/*
+ * Space for CPU0's segment table.
+ *
+ * On iSeries, the hypervisor must fill in at least one entry before
+ * we get control (with relocate on).  The address is give to the hv
+ * as a page number (see xLparMap in lpardata.c), so this must be at a
+ * fixed address (the linker can't compute (u64)&initial_stab >>
+ * PAGE_SHIFT).
+ */
+       . = STAB0_PHYS_ADDR     /* 0x6000 */
+       .globl initial_stab
+initial_stab:
+       .space  4096
+
+/*
+ * Data area reserved for FWNMI option.
+ * This address (0x7000) is fixed by the RPA.
+ */
+       .= 0x7000
+       .globl fwnmi_data_area
+fwnmi_data_area:
+
+       /* iSeries does not use the FWNMI stuff, so it is safe to put
+        * this here, even if we later allow kernels that will boot on
+        * both pSeries and iSeries */
+#ifdef CONFIG_PPC_ISERIES
+        . = LPARMAP_PHYS
+#include "lparmap.s"
+/*
+ * This ".text" is here for old compilers that generate a trailing
+ * .note section when compiling .c files to .s
+ */
+       .text
+#endif /* CONFIG_PPC_ISERIES */
+
+        . = 0x8000
+
+/*
+ * On pSeries, secondary processors spin in the following code.
+ * At entry, r3 = this processor's number (physical cpu id)
+ */
+_GLOBAL(pSeries_secondary_smp_init)
+       mr      r24,r3
+       
+       /* turn on 64-bit mode */
+       bl      .enable_64b_mode
+       isync
+
+       /* Copy some CPU settings from CPU 0 */
+       bl      .__restore_cpu_setup
+
+       /* Set up a paca value for this processor. Since we have the
+        * physical cpu id in r24, we need to search the pacas to find
+        * which logical id maps to our physical one.
+        */
+       LOADADDR(r13, paca)             /* Get base vaddr of paca array  */
+       li      r5,0                    /* logical cpu id                */
+1:     lhz     r6,PACAHWCPUID(r13)     /* Load HW procid from paca      */
+       cmpw    r6,r24                  /* Compare to our id             */
+       beq     2f
+       addi    r13,r13,PACA_SIZE       /* Loop to next PACA on miss     */
+       addi    r5,r5,1
+       cmpwi   r5,NR_CPUS
+       blt     1b
+
+       mr      r3,r24                  /* not found, copy phys to r3    */
+       b       .kexec_wait             /* next kernel might do better   */
+
+2:     mtspr   SPRN_SPRG3,r13          /* Save vaddr of paca in SPRG3   */
+       /* From now on, r24 is expected to be logical cpuid */
+       mr      r24,r5
+3:     HMT_LOW
+       lbz     r23,PACAPROCSTART(r13)  /* Test if this processor should */
+                                       /* start.                        */
+       sync
+
+       /* Create a temp kernel stack for use before relocation is on.  */
+       ld      r1,PACAEMERGSP(r13)
+       subi    r1,r1,STACK_FRAME_OVERHEAD
+
+       cmpwi   0,r23,0
+#ifdef CONFIG_SMP
+       bne     .__secondary_start
+#endif
+       b       3b                      /* Loop until told to go         */
+
+#ifdef CONFIG_PPC_ISERIES
+_STATIC(__start_initialization_iSeries)
+       /* Clear out the BSS */
+       LOADADDR(r11,__bss_stop)
+       LOADADDR(r8,__bss_start)
+       sub     r11,r11,r8              /* bss size                     */
+       addi    r11,r11,7               /* round up to an even double word */
+       rldicl. r11,r11,61,3            /* shift right by 3             */
+       beq     4f
+       addi    r8,r8,-8
+       li      r0,0
+       mtctr   r11                     /* zero this many doublewords   */
+3:     stdu    r0,8(r8)
+       bdnz    3b
+4:
+       LOADADDR(r1,init_thread_union)
+       addi    r1,r1,THREAD_SIZE
+       li      r0,0
+       stdu    r0,-STACK_FRAME_OVERHEAD(r1)
+
+       LOADADDR(r3,cpu_specs)
+       LOADADDR(r4,cur_cpu_spec)
+       li      r5,0
+       bl      .identify_cpu
+
+       LOADADDR(r2,__toc_start)
+       addi    r2,r2,0x4000
+       addi    r2,r2,0x4000
+
+       bl      .iSeries_early_setup
+       bl      .early_setup
+
+       /* relocation is on at this point */
+
+       b       .start_here_common
+#endif /* CONFIG_PPC_ISERIES */
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+
+_STATIC(__mmu_off)
+       mfmsr   r3
+       andi.   r0,r3,MSR_IR|MSR_DR
+       beqlr
+       andc    r3,r3,r0
+       mtspr   SPRN_SRR0,r4
+       mtspr   SPRN_SRR1,r3
+       sync
+       rfid
+       b       .       /* prevent speculative execution */
+
+
+/*
+ * Here is our main kernel entry point. We support currently 2 kind of entries
+ * depending on the value of r5.
+ *
+ *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
+ *                 in r3...r7
+ *   
+ *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
+ *                 DT block, r4 is a physical pointer to the kernel itself
+ *
+ */
+_GLOBAL(__start_initialization_multiplatform)
+       /*
+        * Are we booted from a PROM Of-type client-interface ?
+        */
+       cmpldi  cr0,r5,0
+       bne     .__boot_from_prom               /* yes -> prom */
+
+       /* Save parameters */
+       mr      r31,r3
+       mr      r30,r4
+
+       /* Make sure we are running in 64 bits mode */
+       bl      .enable_64b_mode
+
+       /* Setup some critical 970 SPRs before switching MMU off */
+       bl      .__970_cpu_preinit
+
+       /* cpu # */
+       li      r24,0
+
+       /* Switch off MMU if not already */
+       LOADADDR(r4, .__after_prom_start - KERNELBASE)
+       add     r4,r4,r30
+       bl      .__mmu_off
+       b       .__after_prom_start
+
+_STATIC(__boot_from_prom)
+       /* Save parameters */
+       mr      r31,r3
+       mr      r30,r4
+       mr      r29,r5
+       mr      r28,r6
+       mr      r27,r7
+
+       /* Make sure we are running in 64 bits mode */
+       bl      .enable_64b_mode
+
+       /* put a relocation offset into r3 */
+       bl      .reloc_offset
+
+       LOADADDR(r2,__toc_start)
+       addi    r2,r2,0x4000
+       addi    r2,r2,0x4000
+
+       /* Relocate the TOC from a virt addr to a real addr */
+       add     r2,r2,r3
+
+       /* Restore parameters */
+       mr      r3,r31
+       mr      r4,r30
+       mr      r5,r29
+       mr      r6,r28
+       mr      r7,r27
+
+       /* Do all of the interaction with OF client interface */
+       bl      .prom_init
+       /* We never return */
+       trap
+
+/*
+ * At this point, r3 contains the physical address we are running at,
+ * returned by prom_init()
+ */
+_STATIC(__after_prom_start)
+
+/*
+ * We need to run with __start at physical address 0.
+ * This will leave some code in the first 256B of
+ * real memory, which are reserved for software use.
+ * The remainder of the first page is loaded with the fixed
+ * interrupt vectors.  The next two pages are filled with
+ * unknown exception placeholders.
+ *
+ * Note: This process overwrites the OF exception vectors.
+ *     r26 == relocation offset
+ *     r27 == KERNELBASE
+ */
+       bl      .reloc_offset
+       mr      r26,r3
+       SET_REG_TO_CONST(r27,KERNELBASE)
+
+       li      r3,0                    /* target addr */
+
+       // XXX FIXME: Use phys returned by OF (r30)
+       add     r4,r27,r26              /* source addr                   */
+                                       /* current address of _start     */
+                                       /*   i.e. where we are running   */
+                                       /*      the source addr          */
+
+       LOADADDR(r5,copy_to_here)       /* # bytes of memory to copy     */
+       sub     r5,r5,r27
+
+       li      r6,0x100                /* Start offset, the first 0x100 */
+                                       /* bytes were copied earlier.    */
+
+       bl      .copy_and_flush         /* copy the first n bytes        */
+                                       /* this includes the code being  */
+                                       /* executed here.                */
+
+       LOADADDR(r0, 4f)                /* Jump to the copy of this code */
+       mtctr   r0                      /* that we just made/relocated   */
+       bctr
+
+4:     LOADADDR(r5,klimit)
+       add     r5,r5,r26
+       ld      r5,0(r5)                /* get the value of klimit */
+       sub     r5,r5,r27
+       bl      .copy_and_flush         /* copy the rest */
+       b       .start_here_multiplatform
+
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+/*
+ * Copy routine used to copy the kernel to start at physical address 0
+ * and flush and invalidate the caches as needed.
+ * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
+ * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
+ *
+ * Note: this routine *only* clobbers r0, r6 and lr
+ */
+_GLOBAL(copy_and_flush)
+       addi    r5,r5,-8
+       addi    r6,r6,-8
+4:     li      r0,16                   /* Use the least common         */
+                                       /* denominator cache line       */
+                                       /* size.  This results in       */
+                                       /* extra cache line flushes     */
+                                       /* but operation is correct.    */
+                                       /* Can't get cache line size    */
+                                       /* from NACA as it is being     */
+                                       /* moved too.                   */
+
+       mtctr   r0                      /* put # words/line in ctr      */
+3:     addi    r6,r6,8                 /* copy a cache line            */
+       ldx     r0,r6,r4
+       stdx    r0,r6,r3
+       bdnz    3b
+       dcbst   r6,r3                   /* write it to memory           */
+       sync
+       icbi    r6,r3                   /* flush the icache line        */
+       cmpld   0,r6,r5
+       blt     4b
+       sync
+       addi    r5,r5,8
+       addi    r6,r6,8
+       blr
+
+.align 8
+copy_to_here:
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_PPC_PMAC
+/*
+ * On PowerMac, secondary processors starts from the reset vector, which
+ * is temporarily turned into a call to one of the functions below.
+ */
+       .section ".text";
+       .align 2 ;
+
+       .globl  __secondary_start_pmac_0
+__secondary_start_pmac_0:
+       /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
+       li      r24,0
+       b       1f
+       li      r24,1
+       b       1f
+       li      r24,2
+       b       1f
+       li      r24,3
+1:
+       
+_GLOBAL(pmac_secondary_start)
+       /* turn on 64-bit mode */
+       bl      .enable_64b_mode
+       isync
+
+       /* Copy some CPU settings from CPU 0 */
+       bl      .__restore_cpu_setup
+
+       /* pSeries do that early though I don't think we really need it */
+       mfmsr   r3
+       ori     r3,r3,MSR_RI
+       mtmsrd  r3                      /* RI on */
+
+       /* Set up a paca value for this processor. */
+       LOADADDR(r4, paca)               /* Get base vaddr of paca array        */
+       mulli   r13,r24,PACA_SIZE        /* Calculate vaddr of right paca */
+       add     r13,r13,r4              /* for this processor.          */
+       mtspr   SPRN_SPRG3,r13           /* Save vaddr of paca in SPRG3 */
+
+       /* Create a temp kernel stack for use before relocation is on.  */
+       ld      r1,PACAEMERGSP(r13)
+       subi    r1,r1,STACK_FRAME_OVERHEAD
+
+       b       .__secondary_start
+
+#endif /* CONFIG_PPC_PMAC */
+
+/*
+ * This function is called after the master CPU has released the
+ * secondary processors.  The execution environment is relocation off.
+ * The paca for this processor has the following fields initialized at
+ * this point:
+ *   1. Processor number
+ *   2. Segment table pointer (virtual address)
+ * On entry the following are set:
+ *   r1        = stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
+ *   r24   = cpu# (in Linux terms)
+ *   r13   = paca virtual address
+ *   SPRG3 = paca virtual address
+ */
+_GLOBAL(__secondary_start)
+
+       HMT_MEDIUM                      /* Set thread priority to MEDIUM */
+
+       ld      r2,PACATOC(r13)
+       li      r6,0
+       stb     r6,PACAPROCENABLED(r13)
+
+#ifndef CONFIG_PPC_ISERIES
+       /* Initialize the page table pointer register. */
+       LOADADDR(r6,_SDR1)
+       ld      r6,0(r6)                /* get the value of _SDR1        */
+       mtspr   SPRN_SDR1,r6                    /* set the htab location         */
+#endif
+       /* Initialize the first segment table (or SLB) entry             */
+       ld      r3,PACASTABVIRT(r13)    /* get addr of segment table     */
+       bl      .stab_initialize
+
+       /* Initialize the kernel stack.  Just a repeat for iSeries.      */
+       LOADADDR(r3,current_set)
+       sldi    r28,r24,3               /* get current_set[cpu#]         */
+       ldx     r1,r3,r28
+       addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+       std     r1,PACAKSAVE(r13)
+
+       ld      r3,PACASTABREAL(r13)    /* get raddr of segment table    */
+       ori     r4,r3,1                 /* turn on valid bit             */
+
+#ifdef CONFIG_PPC_ISERIES
+       li      r0,-1                   /* hypervisor call */
+       li      r3,1
+       sldi    r3,r3,63                /* 0x8000000000000000 */
+       ori     r3,r3,4                 /* 0x8000000000000004 */
+       sc                              /* HvCall_setASR */
+#else
+       /* set the ASR */
+       ld      r3,systemcfg@got(r2)    /* r3 = ptr to systemcfg         */
+       ld      r3,0(r3)
+       lwz     r3,PLATFORM(r3)         /* r3 = platform flags           */
+       andi.   r3,r3,PLATFORM_LPAR     /* Test if bit 0 is set (LPAR bit) */
+       beq     98f                     /* branch if result is 0  */
+       mfspr   r3,SPRN_PVR
+       srwi    r3,r3,16
+       cmpwi   r3,0x37                 /* SStar  */
+       beq     97f
+       cmpwi   r3,0x36                 /* IStar  */
+       beq     97f
+       cmpwi   r3,0x34                 /* Pulsar */
+       bne     98f
+97:    li      r3,H_SET_ASR            /* hcall = H_SET_ASR */
+       HVSC                            /* Invoking hcall */
+       b       99f
+98:                                    /* !(rpa hypervisor) || !(star)  */
+       mtasr   r4                      /* set the stab location         */
+99:
+#endif
+       li      r7,0
+       mtlr    r7
+
+       /* enable MMU and jump to start_secondary */
+       LOADADDR(r3,.start_secondary_prolog)
+       SET_REG_TO_CONST(r4, MSR_KERNEL)
+#ifdef DO_SOFT_DISABLE
+       ori     r4,r4,MSR_EE
+#endif
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
+       rfid
+       b       .       /* prevent speculative execution */
+
+/* 
+ * Running with relocation on at this point.  All we want to do is
+ * zero the stack back-chain pointer before going into C code.
+ */
+_GLOBAL(start_secondary_prolog)
+       li      r3,0
+       std     r3,0(r1)                /* Zero the stack frame pointer */
+       bl      .start_secondary
+#endif
+
+/*
+ * This subroutine clobbers r11 and r12
+ */
+_GLOBAL(enable_64b_mode)
+       mfmsr   r11                     /* grab the current MSR */
+       li      r12,1
+       rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
+       or      r11,r11,r12
+       li      r12,1
+       rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
+       or      r11,r11,r12
+       mtmsrd  r11
+       isync
+       blr
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+/*
+ * This is where the main kernel code starts.
+ */
+_STATIC(start_here_multiplatform)
+       /* get a new offset, now that the kernel has moved. */
+       bl      .reloc_offset
+       mr      r26,r3
+
+       /* Clear out the BSS. It may have been done in prom_init,
+        * already but that's irrelevant since prom_init will soon
+        * be detached from the kernel completely. Besides, we need
+        * to clear it now for kexec-style entry.
+        */
+       LOADADDR(r11,__bss_stop)
+       LOADADDR(r8,__bss_start)
+       sub     r11,r11,r8              /* bss size                     */
+       addi    r11,r11,7               /* round up to an even double word */
+       rldicl. r11,r11,61,3            /* shift right by 3             */
+       beq     4f
+       addi    r8,r8,-8
+       li      r0,0
+       mtctr   r11                     /* zero this many doublewords   */
+3:     stdu    r0,8(r8)
+       bdnz    3b
+4:
+
+       mfmsr   r6
+       ori     r6,r6,MSR_RI
+       mtmsrd  r6                      /* RI on */
+
+#ifdef CONFIG_HMT
+       /* Start up the second thread on cpu 0 */
+       mfspr   r3,SPRN_PVR
+       srwi    r3,r3,16
+       cmpwi   r3,0x34                 /* Pulsar  */
+       beq     90f
+       cmpwi   r3,0x36                 /* Icestar */
+       beq     90f
+       cmpwi   r3,0x37                 /* SStar   */
+       beq     90f
+       b       91f                     /* HMT not supported */
+90:    li      r3,0
+       bl      .hmt_start_secondary
+91:
+#endif
+
+       /* The following gets the stack and TOC set up with the regs */
+       /* pointing to the real addr of the kernel stack.  This is   */
+       /* all done to support the C function call below which sets  */
+       /* up the htab.  This is done because we have relocated the  */
+       /* kernel but are still running in real mode. */
+
+       LOADADDR(r3,init_thread_union)
+       add     r3,r3,r26
+
+       /* set up a stack pointer (physical address) */
+       addi    r1,r3,THREAD_SIZE
+       li      r0,0
+       stdu    r0,-STACK_FRAME_OVERHEAD(r1)
+
+       /* set up the TOC (physical address) */
+       LOADADDR(r2,__toc_start)
+       addi    r2,r2,0x4000
+       addi    r2,r2,0x4000
+       add     r2,r2,r26
+
+       LOADADDR(r3,cpu_specs)
+       add     r3,r3,r26
+       LOADADDR(r4,cur_cpu_spec)
+       add     r4,r4,r26
+       mr      r5,r26
+       bl      .identify_cpu
+
+       /* Save some low level config HIDs of CPU0 to be copied to
+        * other CPUs later on, or used for suspend/resume
+        */
+       bl      .__save_cpu_setup
+       sync
+
+       /* Setup a valid physical PACA pointer in SPRG3 for early_setup
+        * note that boot_cpuid can always be 0 nowadays since there is
+        * nowhere it can be initialized differently before we reach this
+        * code
+        */
+       LOADADDR(r27, boot_cpuid)
+       add     r27,r27,r26
+       lwz     r27,0(r27)
+
+       LOADADDR(r24, paca)             /* Get base vaddr of paca array  */
+       mulli   r13,r27,PACA_SIZE       /* Calculate vaddr of right paca */
+       add     r13,r13,r24             /* for this processor.           */
+       add     r13,r13,r26             /* convert to physical addr      */
+       mtspr   SPRN_SPRG3,r13          /* PPPBBB: Temp... -Peter */
+       
+       /* Do very early kernel initializations, including initial hash table,
+        * stab and slb setup before we turn on relocation.     */
+
+       /* Restore parameters passed from prom_init/kexec */
+       mr      r3,r31
+       bl      .early_setup
+
+       /* set the ASR */
+       ld      r3,PACASTABREAL(r13)
+       ori     r4,r3,1                 /* turn on valid bit             */
+       ld      r3,systemcfg@got(r2)    /* r3 = ptr to systemcfg */
+       ld      r3,0(r3)
+       lwz     r3,PLATFORM(r3)         /* r3 = platform flags */
+       andi.   r3,r3,PLATFORM_LPAR     /* Test if bit 0 is set (LPAR bit) */
+       beq     98f                     /* branch if result is 0  */
+       mfspr   r3,SPRN_PVR
+       srwi    r3,r3,16
+       cmpwi   r3,0x37                 /* SStar */
+       beq     97f
+       cmpwi   r3,0x36                 /* IStar  */
+       beq     97f
+       cmpwi   r3,0x34                 /* Pulsar */
+       bne     98f
+97:    li      r3,H_SET_ASR            /* hcall = H_SET_ASR */
+       HVSC                            /* Invoking hcall */
+       b       99f
+98:                                    /* !(rpa hypervisor) || !(star) */
+       mtasr   r4                      /* set the stab location        */
+99:
+       /* Set SDR1 (hash table pointer) */
+       ld      r3,systemcfg@got(r2)    /* r3 = ptr to systemcfg */
+       ld      r3,0(r3)
+       lwz     r3,PLATFORM(r3)         /* r3 = platform flags */
+       /* Test if bit 0 is set (LPAR bit) */
+       andi.   r3,r3,PLATFORM_LPAR
+       bne     98f                     /* branch if result is !0  */
+       LOADADDR(r6,_SDR1)              /* Only if NOT LPAR */
+       add     r6,r6,r26
+       ld      r6,0(r6)                /* get the value of _SDR1 */
+       mtspr   SPRN_SDR1,r6                    /* set the htab location  */
+98: 
+       LOADADDR(r3,.start_here_common)
+       SET_REG_TO_CONST(r4, MSR_KERNEL)
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
+       rfid
+       b       .       /* prevent speculative execution */
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+       
+       /* This is where all platforms converge execution */
+_STATIC(start_here_common)
+       /* relocation is on at this point */
+
+       /* The following code sets up the SP and TOC now that we are */
+       /* running with translation enabled. */
+
+       LOADADDR(r3,init_thread_union)
+
+       /* set up the stack */
+       addi    r1,r3,THREAD_SIZE
+       li      r0,0
+       stdu    r0,-STACK_FRAME_OVERHEAD(r1)
+
+       /* Apply the CPUs-specific fixups (nop out sections not relevant
+        * to this CPU
+        */
+       li      r3,0
+       bl      .do_cpu_ftr_fixups
+
+       LOADADDR(r26, boot_cpuid)
+       lwz     r26,0(r26)
+
+       LOADADDR(r24, paca)             /* Get base vaddr of paca array  */
+       mulli   r13,r26,PACA_SIZE       /* Calculate vaddr of right paca */
+       add     r13,r13,r24             /* for this processor.           */
+       mtspr   SPRN_SPRG3,r13
+
+       /* ptr to current */
+       LOADADDR(r4,init_task)
+       std     r4,PACACURRENT(r13)
+
+       /* Load the TOC */
+       ld      r2,PACATOC(r13)
+       std     r1,PACAKSAVE(r13)
+
+       bl      .setup_system
+
+       /* Load up the kernel context */
+5:
+#ifdef DO_SOFT_DISABLE
+       li      r5,0
+       stb     r5,PACAPROCENABLED(r13) /* Soft Disabled */
+       mfmsr   r5
+       ori     r5,r5,MSR_EE            /* Hard Enabled */
+       mtmsrd  r5
+#endif
+
+       bl .start_kernel
+
+_GLOBAL(hmt_init)
+#ifdef CONFIG_HMT
+       LOADADDR(r5, hmt_thread_data)
+       mfspr   r7,SPRN_PVR
+       srwi    r7,r7,16
+       cmpwi   r7,0x34                 /* Pulsar  */
+       beq     90f
+       cmpwi   r7,0x36                 /* Icestar */
+       beq     91f
+       cmpwi   r7,0x37                 /* SStar   */
+       beq     91f
+       b       101f
+90:    mfspr   r6,SPRN_PIR
+       andi.   r6,r6,0x1f
+       b       92f
+91:    mfspr   r6,SPRN_PIR
+       andi.   r6,r6,0x3ff
+92:    sldi    r4,r24,3
+       stwx    r6,r5,r4
+       bl      .hmt_start_secondary
+       b       101f
+
+__hmt_secondary_hold:
+       LOADADDR(r5, hmt_thread_data)
+       clrldi  r5,r5,4
+       li      r7,0
+       mfspr   r6,SPRN_PIR
+       mfspr   r8,SPRN_PVR
+       srwi    r8,r8,16
+       cmpwi   r8,0x34
+       bne     93f
+       andi.   r6,r6,0x1f
+       b       103f
+93:    andi.   r6,r6,0x3f
+
+103:   lwzx    r8,r5,r7
+       cmpw    r8,r6
+       beq     104f
+       addi    r7,r7,8
+       b       103b
+
+104:   addi    r7,r7,4
+       lwzx    r9,r5,r7
+       mr      r24,r9
+101:
+#endif
+       mr      r3,r24
+       b       .pSeries_secondary_smp_init
+
+#ifdef CONFIG_HMT
+_GLOBAL(hmt_start_secondary)
+       LOADADDR(r4,__hmt_secondary_hold)
+       clrldi  r4,r4,4
+       mtspr   SPRN_NIADORM, r4
+       mfspr   r4, SPRN_MSRDORM
+       li      r5, -65
+       and     r4, r4, r5
+       mtspr   SPRN_MSRDORM, r4
+       lis     r4,0xffef
+       ori     r4,r4,0x7403
+       mtspr   SPRN_TSC, r4
+       li      r4,0x1f4
+       mtspr   SPRN_TST, r4
+       mfspr   r4, SPRN_HID0
+       ori     r4, r4, 0x1
+       mtspr   SPRN_HID0, r4
+       mfspr   r4, SPRN_CTRLF
+       oris    r4, r4, 0x40
+       mtspr   SPRN_CTRLT, r4
+       blr
+#endif
+
+#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
+_GLOBAL(smp_release_cpus)
+       /* All secondary cpus are spinning on a common
+        * spinloop, release them all now so they can start
+        * to spin on their individual paca spinloops.
+        * For non SMP kernels, the secondary cpus never
+        * get out of the common spinloop.
+        * XXX This does nothing useful on iSeries, secondaries are
+        * already waiting on their paca.
+        */
+       li      r3,1
+       LOADADDR(r5,__secondary_hold_spinloop)
+       std     r3,0(r5)
+       sync
+       blr
+#endif /* CONFIG_SMP */
+
+
+/*
+ * We put a few things here that have to be page-aligned.
+ * This stuff goes at the beginning of the bss, which is page-aligned.
+ */
+       .section ".bss"
+
+       .align  PAGE_SHIFT
+
+       .globl  empty_zero_page
+empty_zero_page:
+       .space  PAGE_SIZE
+
+       .globl  swapper_pg_dir
+swapper_pg_dir:
+       .space  PAGE_SIZE
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * Used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+       .globl  cmd_line
+cmd_line:
+       .space  COMMAND_LINE_SIZE
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
new file mode 100644 (file)
index 0000000..de09787
--- /dev/null
@@ -0,0 +1,860 @@
+/*
+ *  arch/ppc/kernel/except_8xx.S
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *  MPC8xx modifications by Dan Malek
+ *    Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
+ *
+ *  This file contains low-level support and setup for PowerPC 8xx
+ *  embedded processors, including trap and interrupt dispatch.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/cache.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/* Macro to make the code more readable. */
+#ifdef CONFIG_8xx_CPU6
+#define DO_8xx_CPU6(val, reg)  \
+       li      reg, val;       \
+       stw     reg, 12(r0);    \
+       lwz     reg, 12(r0);
+#else
+#define DO_8xx_CPU6(val, reg)
+#endif
+       .text
+       .globl  _stext
+_stext:
+       .text
+       .globl  _start
+_start:
+
+/* MPC8xx
+ * This port was done on an MBX board with an 860.  Right now I only
+ * support an ELF compressed (zImage) boot from EPPC-Bug because the
+ * code there loads up some registers before calling us:
+ *   r3: ptr to board info data
+ *   r4: initrd_start or if no initrd then 0
+ *   r5: initrd_end - unused if r4 is 0
+ *   r6: Start of command line string
+ *   r7: End of command line string
+ *
+ * I decided to use conditional compilation instead of checking PVR and
+ * adding more processor specific branches around code I don't need.
+ * Since this is an embedded processor, I also appreciate any memory
+ * savings I can get.
+ *
+ * The MPC8xx does not have any BATs, but it supports large page sizes.
+ * We first initialize the MMU to support 8M byte pages, then load one
+ * entry into each of the instruction and data TLBs to map the first
+ * 8M 1:1.  I also mapped an additional I/O space 1:1 so we can get to
+ * the "internal" processor registers before MMU_init is called.
+ *
+ * The TLB code currently contains a major hack.  Since I use the condition
+ * code register, I have to save and restore it.  I am out of registers, so
+ * I just store it in memory location 0 (the TLB handlers are not reentrant).
+ * To avoid making any decisions, I need to use the "segment" valid bit
+ * in the first level table, but that would require many changes to the
+ * Linux page directory/table functions that I don't want to do right now.
+ *
+ * I used to use SPRG2 for a temporary register in the TLB handler, but it
+ * has since been put to other uses.  I now use a hack to save a register
+ * and the CCR at memory location 0.....Someday I'll fix this.....
+ *     -- Dan
+ */
+       .globl  __start
+__start:
+       mr      r31,r3                  /* save parameters */
+       mr      r30,r4
+       mr      r29,r5
+       mr      r28,r6
+       mr      r27,r7
+
+       /* We have to turn on the MMU right away so we get cache modes
+        * set correctly.
+        */
+       bl      initial_mmu
+
+/* We now have the lower 8 Meg mapped into TLB entries, and the caches
+ * ready to work.
+ */
+
+turn_on_mmu:
+       mfmsr   r0
+       ori     r0,r0,MSR_DR|MSR_IR
+       mtspr   SPRN_SRR1,r0
+       lis     r0,start_here@h
+       ori     r0,r0,start_here@l
+       mtspr   SPRN_SRR0,r0
+       SYNC
+       rfi                             /* enables MMU */
+
+/*
+ * Exception entry code.  This code runs with address translation
+ * turned off, i.e. using physical addresses.
+ * We assume sprg3 has the physical address of the current
+ * task's thread_struct.
+ */
+#define EXCEPTION_PROLOG       \
+       mtspr   SPRN_SPRG0,r10; \
+       mtspr   SPRN_SPRG1,r11; \
+       mfcr    r10;            \
+       EXCEPTION_PROLOG_1;     \
+       EXCEPTION_PROLOG_2
+
+#define EXCEPTION_PROLOG_1     \
+       mfspr   r11,SPRN_SRR1;          /* check whether user or kernel */ \
+       andi.   r11,r11,MSR_PR; \
+       tophys(r11,r1);                 /* use tophys(r1) if kernel */ \
+       beq     1f;             \
+       mfspr   r11,SPRN_SPRG3; \
+       lwz     r11,THREAD_INFO-THREAD(r11);    \
+       addi    r11,r11,THREAD_SIZE;    \
+       tophys(r11,r11);        \
+1:     subi    r11,r11,INT_FRAME_SIZE  /* alloc exc. frame */
+
+
+#define EXCEPTION_PROLOG_2     \
+       CLR_TOP32(r11);         \
+       stw     r10,_CCR(r11);          /* save registers */ \
+       stw     r12,GPR12(r11); \
+       stw     r9,GPR9(r11);   \
+       mfspr   r10,SPRN_SPRG0; \
+       stw     r10,GPR10(r11); \
+       mfspr   r12,SPRN_SPRG1; \
+       stw     r12,GPR11(r11); \
+       mflr    r10;            \
+       stw     r10,_LINK(r11); \
+       mfspr   r12,SPRN_SRR0;  \
+       mfspr   r9,SPRN_SRR1;   \
+       stw     r1,GPR1(r11);   \
+       stw     r1,0(r11);      \
+       tovirt(r1,r11);                 /* set new kernel sp */ \
+       li      r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
+       MTMSRD(r10);                    /* (except for mach check in rtas) */ \
+       stw     r0,GPR0(r11);   \
+       SAVE_4GPRS(3, r11);     \
+       SAVE_2GPRS(7, r11)
+
+/*
+ * Note: code which follows this uses cr0.eq (set if from kernel),
+ * r11, r12 (SRR0), and r9 (SRR1).
+ *
+ * Note2: once we have set r1 we are in a position to take exceptions
+ * again, and we could thus set MSR:RI at that point.
+ */
+
+/*
+ * Exception vectors.
+ */
+#define EXCEPTION(n, label, hdlr, xfer)                \
+       . = n;                                  \
+label:                                         \
+       EXCEPTION_PROLOG;                       \
+       addi    r3,r1,STACK_FRAME_OVERHEAD;     \
+       xfer(n, hdlr)
+
+#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret)    \
+       li      r10,trap;                                       \
+       stw     r10,TRAP(r11);                                  \
+       li      r10,MSR_KERNEL;                                 \
+       copyee(r10, r9);                                        \
+       bl      tfer;                                           \
+i##n:                                                          \
+       .long   hdlr;                                           \
+       .long   ret
+
+#define COPY_EE(d, s)          rlwimi d,s,0,16,16
+#define NOCOPY(d, s)
+
+#define EXC_XFER_STD(n, hdlr)          \
+       EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
+                         ret_from_except_full)
+
+#define EXC_XFER_LITE(n, hdlr)         \
+       EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
+                         ret_from_except)
+
+#define EXC_XFER_EE(n, hdlr)           \
+       EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
+                         ret_from_except_full)
+
+#define EXC_XFER_EE_LITE(n, hdlr)      \
+       EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
+                         ret_from_except)
+
+/* System reset */
+       EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
+
+/* Machine check */
+       . = 0x200
+MachineCheck:
+       EXCEPTION_PROLOG
+       mfspr r4,SPRN_DAR
+       stw r4,_DAR(r11)
+       mfspr r5,SPRN_DSISR
+       stw r5,_DSISR(r11)
+       addi r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_STD(0x200, machine_check_exception)
+
+/* Data access exception.
+ * This is "never generated" by the MPC8xx.  We jump to it for other
+ * translation errors.
+ */
+       . = 0x300
+DataAccess:
+       EXCEPTION_PROLOG
+       mfspr   r10,SPRN_DSISR
+       stw     r10,_DSISR(r11)
+       mr      r5,r10
+       mfspr   r4,SPRN_DAR
+       EXC_XFER_EE_LITE(0x300, handle_page_fault)
+
+/* Instruction access exception.
+ * This is "never generated" by the MPC8xx.  We jump to it for other
+ * translation errors.
+ */
+       . = 0x400
+InstructionAccess:
+       EXCEPTION_PROLOG
+       mr      r4,r12
+       mr      r5,r9
+       EXC_XFER_EE_LITE(0x400, handle_page_fault)
+
+/* External interrupt */
+       EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+
+/* Alignment exception */
+       . = 0x600
+Alignment:
+       EXCEPTION_PROLOG
+       mfspr   r4,SPRN_DAR
+       stw     r4,_DAR(r11)
+       mfspr   r5,SPRN_DSISR
+       stw     r5,_DSISR(r11)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_EE(0x600, alignment_exception)
+
+/* Program check exception */
+       EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
+
+/* No FPU on MPC8xx.  This exception is not supposed to happen.
+*/
+       EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
+
+/* Decrementer */
+       EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
+
+       EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
+
+/* System call */
+       . = 0xc00
+SystemCall:
+       EXCEPTION_PROLOG
+       EXC_XFER_EE_LITE(0xc00, DoSyscall)
+
+/* Single step - not used on 601 */
+       EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
+       EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
+
+/* On the MPC8xx, this is a software emulation interrupt.  It occurs
+ * for all unimplemented and illegal instructions.
+ */
+       EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)
+
+       . = 0x1100
+/*
+ * For the MPC8xx, this is a software tablewalk to load the instruction
+ * TLB.  It is modelled after the example in the Motorola manual.  The task
+ * switch loads the M_TWB register with the pointer to the first level table.
+ * If we discover there is no second level table (value is zero) or if there
+ * is an invalid pte, we load that into the TLB, which causes another fault
+ * into the TLB Error interrupt where we can handle such problems.
+ * We have to use the MD_xxx registers for the tablewalk because the
+ * equivalent MI_xxx registers only perform the attribute functions.
+ */
+InstructionTLBMiss:
+#ifdef CONFIG_8xx_CPU6
+       stw     r3, 8(r0)
+#endif
+       DO_8xx_CPU6(0x3f80, r3)
+       mtspr   SPRN_M_TW, r10  /* Save a couple of working registers */
+       mfcr    r10
+       stw     r10, 0(r0)
+       stw     r11, 4(r0)
+       mfspr   r10, SPRN_SRR0  /* Get effective address of fault */
+       DO_8xx_CPU6(0x3780, r3)
+       mtspr   SPRN_MD_EPN, r10        /* Have to use MD_EPN for walk, MI_EPN can't */
+       mfspr   r10, SPRN_M_TWB /* Get level 1 table entry address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       andi.   r11, r10, 0x0800        /* Address >= 0x80000000 */
+       beq     3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+       rlwimi  r10, r11, 0, 2, 19
+3:
+       lwz     r11, 0(r10)     /* Get the level 1 entry */
+       rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
+       beq     2f              /* If zero, don't try to find a pte */
+
+       /* We have a pte table, so load the MI_TWC with the attributes
+        * for this "segment."
+        */
+       ori     r11,r11,1               /* Set valid bit */
+       DO_8xx_CPU6(0x2b80, r3)
+       mtspr   SPRN_MI_TWC, r11        /* Set segment attributes */
+       DO_8xx_CPU6(0x3b80, r3)
+       mtspr   SPRN_MD_TWC, r11        /* Load pte table base address */
+       mfspr   r11, SPRN_MD_TWC        /* ....and get the pte address */
+       lwz     r10, 0(r11)     /* Get the pte */
+
+       ori     r10, r10, _PAGE_ACCESSED
+       stw     r10, 0(r11)
+
+       /* The Linux PTE won't go exactly into the MMU TLB.
+        * Software indicator bits 21, 22 and 28 must be clear.
+        * Software indicator bits 24, 25, 26, and 27 must be
+        * set.  All other Linux PTE bits control the behavior
+        * of the MMU.
+        */
+2:     li      r11, 0x00f0
+       rlwimi  r10, r11, 0, 24, 28     /* Set 24-27, clear 28 */
+       DO_8xx_CPU6(0x2d80, r3)
+       mtspr   SPRN_MI_RPN, r10        /* Update TLB entry */
+
+       mfspr   r10, SPRN_M_TW  /* Restore registers */
+       lwz     r11, 0(r0)
+       mtcr    r11
+       lwz     r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
+       lwz     r3, 8(r0)
+#endif
+       rfi
+
+       . = 0x1200
+DataStoreTLBMiss:
+#ifdef CONFIG_8xx_CPU6
+       stw     r3, 8(r0)
+#endif
+       DO_8xx_CPU6(0x3f80, r3)
+       mtspr   SPRN_M_TW, r10  /* Save a couple of working registers */
+       mfcr    r10
+       stw     r10, 0(r0)
+       stw     r11, 4(r0)
+       mfspr   r10, SPRN_M_TWB /* Get level 1 table entry address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       andi.   r11, r10, 0x0800
+       beq     3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+       rlwimi  r10, r11, 0, 2, 19
+3:
+       lwz     r11, 0(r10)     /* Get the level 1 entry */
+       rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
+       beq     2f              /* If zero, don't try to find a pte */
+
+       /* We have a pte table, so load fetch the pte from the table.
+        */
+       ori     r11, r11, 1     /* Set valid bit in physical L2 page */
+       DO_8xx_CPU6(0x3b80, r3)
+       mtspr   SPRN_MD_TWC, r11        /* Load pte table base address */
+       mfspr   r10, SPRN_MD_TWC        /* ....and get the pte address */
+       lwz     r10, 0(r10)     /* Get the pte */
+
+       /* Insert the Guarded flag into the TWC from the Linux PTE.
+        * It is bit 27 of both the Linux PTE and the TWC (at least
+        * I got that right :-).  It will be better when we can put
+        * this into the Linux pgd/pmd and load it in the operation
+        * above.
+        */
+       rlwimi  r11, r10, 0, 27, 27
+       DO_8xx_CPU6(0x3b80, r3)
+       mtspr   SPRN_MD_TWC, r11
+
+       mfspr   r11, SPRN_MD_TWC        /* get the pte address again */
+       ori     r10, r10, _PAGE_ACCESSED
+       stw     r10, 0(r11)
+
+       /* The Linux PTE won't go exactly into the MMU TLB.
+        * Software indicator bits 21, 22 and 28 must be clear.
+        * Software indicator bits 24, 25, 26, and 27 must be
+        * set.  All other Linux PTE bits control the behavior
+        * of the MMU.
+        */
+2:     li      r11, 0x00f0
+       rlwimi  r10, r11, 0, 24, 28     /* Set 24-27, clear 28 */
+       DO_8xx_CPU6(0x3d80, r3)
+       mtspr   SPRN_MD_RPN, r10        /* Update TLB entry */
+
+       mfspr   r10, SPRN_M_TW  /* Restore registers */
+       lwz     r11, 0(r0)
+       mtcr    r11
+       lwz     r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
+       lwz     r3, 8(r0)
+#endif
+       rfi
+
+/* This is an instruction TLB error on the MPC8xx.  This could be due
+ * to many reasons, such as executing guarded memory or illegal instruction
+ * addresses.  There is nothing to do but handle a big time error fault.
+ */
+       . = 0x1300
+InstructionTLBError:
+       b       InstructionAccess
+
+/* This is the data TLB error on the MPC8xx.  This could be due to
+ * many reasons, including a dirty update to a pte.  We can catch that
+ * one here, but anything else is an error.  First, we track down the
+ * Linux pte.  If it is valid, write access is allowed, but the
+ * page dirty bit is not set, we will set it and reload the TLB.  For
+ * any other case, we bail out to a higher level function that can
+ * handle it.
+ */
+       . = 0x1400
+DataTLBError:
+#ifdef CONFIG_8xx_CPU6
+       stw     r3, 8(r0)
+#endif
+       DO_8xx_CPU6(0x3f80, r3)
+       mtspr   SPRN_M_TW, r10  /* Save a couple of working registers */
+       mfcr    r10
+       stw     r10, 0(r0)
+       stw     r11, 4(r0)
+
+       /* First, make sure this was a store operation.
+       */
+       mfspr   r10, SPRN_DSISR
+       andis.  r11, r10, 0x0200        /* If set, indicates store op */
+       beq     2f
+
+       /* The EA of a data TLB miss is automatically stored in the MD_EPN
+        * register.  The EA of a data TLB error is automatically stored in
+        * the DAR, but not the MD_EPN register.  We must copy the 20 most
+        * significant bits of the EA from the DAR to MD_EPN before we
+        * start walking the page tables.  We also need to copy the CASID
+        * value from the M_CASID register.
+        * Addendum:  The EA of a data TLB error is _supposed_ to be stored
+        * in DAR, but it seems that this doesn't happen in some cases, such
+        * as when the error is due to a dcbi instruction to a page with a
+        * TLB that doesn't have the changed bit set.  In such cases, there
+        * does not appear to be any way  to recover the EA of the error
+        * since it is neither in DAR nor MD_EPN.  As a workaround, the
+        * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
+        * are initialized in mapin_ram().  This will avoid the problem,
+        * assuming we only use the dcbi instruction on kernel addresses.
+        */
+       mfspr   r10, SPRN_DAR
+       rlwinm  r11, r10, 0, 0, 19
+       ori     r11, r11, MD_EVALID
+       mfspr   r10, SPRN_M_CASID
+       rlwimi  r11, r10, 0, 28, 31
+       DO_8xx_CPU6(0x3780, r3)
+       mtspr   SPRN_MD_EPN, r11
+
+       mfspr   r10, SPRN_M_TWB /* Get level 1 table entry address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       andi.   r11, r10, 0x0800
+       beq     3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+       rlwimi  r10, r11, 0, 2, 19
+3:
+       lwz     r11, 0(r10)     /* Get the level 1 entry */
+       rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
+       beq     2f              /* If zero, bail */
+
+       /* We have a pte table, so fetch the pte from the table.
+        */
+       ori     r11, r11, 1             /* Set valid bit in physical L2 page */
+       DO_8xx_CPU6(0x3b80, r3)
+       mtspr   SPRN_MD_TWC, r11                /* Load pte table base address */
+       mfspr   r11, SPRN_MD_TWC                /* ....and get the pte address */
+       lwz     r10, 0(r11)             /* Get the pte */
+
+       andi.   r11, r10, _PAGE_RW      /* Is it writeable? */
+       beq     2f                      /* Bail out if not */
+
+       /* Update 'changed', among others.
+       */
+       ori     r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+       mfspr   r11, SPRN_MD_TWC                /* Get pte address again */
+       stw     r10, 0(r11)             /* and update pte in table */
+
+       /* The Linux PTE won't go exactly into the MMU TLB.
+        * Software indicator bits 21, 22 and 28 must be clear.
+        * Software indicator bits 24, 25, 26, and 27 must be
+        * set.  All other Linux PTE bits control the behavior
+        * of the MMU.
+        */
+       li      r11, 0x00f0
+       rlwimi  r10, r11, 0, 24, 28     /* Set 24-27, clear 28 */
+       DO_8xx_CPU6(0x3d80, r3)
+       mtspr   SPRN_MD_RPN, r10        /* Update TLB entry */
+
+       mfspr   r10, SPRN_M_TW  /* Restore registers */
+       lwz     r11, 0(r0)
+       mtcr    r11
+       lwz     r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
+       lwz     r3, 8(r0)
+#endif
+       rfi
+2:
+       mfspr   r10, SPRN_M_TW  /* Restore registers */
+       lwz     r11, 0(r0)
+       mtcr    r11
+       lwz     r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
+       lwz     r3, 8(r0)
+#endif
+       b       DataAccess
+
+       EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
+
+/* On the MPC8xx, these next four traps are used for development
+ * support of breakpoints and such.  Someday I will get around to
+ * using them.
+ */
+       EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
+
+       . = 0x2000
+
+       .globl  giveup_fpu
+giveup_fpu:
+       blr
+
+/*
+ * This is where the main kernel code starts.
+ */
+start_here:
+       /* ptr to current */
+       lis     r2,init_task@h
+       ori     r2,r2,init_task@l
+
+       /* ptr to phys current thread */
+       tophys(r4,r2)
+       addi    r4,r4,THREAD    /* init task's THREAD */
+       mtspr   SPRN_SPRG3,r4
+       li      r3,0
+       mtspr   SPRN_SPRG2,r3   /* 0 => r1 has kernel sp */
+
+       /* stack */
+       lis     r1,init_thread_union@ha
+       addi    r1,r1,init_thread_union@l
+       li      r0,0
+       stwu    r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+
+       bl      early_init      /* We have to do this with MMU on */
+
+/*
+ * Decide what sort of machine this is and initialize the MMU.
+ */
+       mr      r3,r31
+       mr      r4,r30
+       mr      r5,r29
+       mr      r6,r28
+       mr      r7,r27
+       bl      machine_init
+       bl      MMU_init
+
+/*
+ * Go back to running unmapped so we can load up new values
+ * and change to using our exception vectors.
+ * On the 8xx, all we have to do is invalidate the TLB to clear
+ * the old 8M byte TLB mappings and load the page table base register.
+ */
+       /* The right way to do this would be to track it down through
+        * init's THREAD like the context switch code does, but this is
+        * easier......until someone changes init's static structures.
+        */
+       lis     r6, swapper_pg_dir@h
+       ori     r6, r6, swapper_pg_dir@l
+       tophys(r6,r6)
+#ifdef CONFIG_8xx_CPU6
+       lis     r4, cpu6_errata_word@h
+       ori     r4, r4, cpu6_errata_word@l
+       li      r3, 0x3980
+       stw     r3, 12(r4)
+       lwz     r3, 12(r4)
+#endif
+       mtspr   SPRN_M_TWB, r6
+       lis     r4,2f@h
+       ori     r4,r4,2f@l
+       tophys(r4,r4)
+       li      r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+       mtspr   SPRN_SRR0,r4
+       mtspr   SPRN_SRR1,r3
+       rfi
+/* Load up the kernel context */
+2:
+       SYNC                    /* Force all PTE updates to finish */
+       tlbia                   /* Clear all TLB entries */
+       sync                    /* wait for tlbia/tlbie to finish */
+       TLBSYNC                 /* ... on all CPUs */
+
+       /* set up the PTE pointers for the Abatron bdiGDB.
+       */
+       tovirt(r6,r6)
+       lis     r5, abatron_pteptrs@h
+       ori     r5, r5, abatron_pteptrs@l
+       stw     r5, 0xf0(r0)    /* Must match your Abatron config file */
+       tophys(r5,r5)
+       stw     r6, 0(r5)
+
+/* Now turn on the MMU for real! */
+       li      r4,MSR_KERNEL
+       lis     r3,start_kernel@h
+       ori     r3,r3,start_kernel@l
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
+       rfi                     /* enable MMU and jump to start_kernel */
+
+/* Set up the initial MMU state so we can do the first level of
+ * kernel initialization.  This maps the first 8 MBytes of memory 1:1
+ * virtual to physical.  Also, set the cache mode since that is defined
+ * by TLB entries and perform any additional mapping (like of the IMMR).
+ * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
+ * 24 Mbytes of data, and the 8M IMMR space.  Anything not covered by
+ * these mappings is mapped by page tables.
+ */
+initial_mmu:
+       tlbia                   /* Invalidate all TLB entries */
+#ifdef CONFIG_PIN_TLB
+       lis     r8, MI_RSV4I@h
+       ori     r8, r8, 0x1c00
+#else
+       li      r8, 0
+#endif
+       mtspr   SPRN_MI_CTR, r8 /* Set instruction MMU control */
+
+#ifdef CONFIG_PIN_TLB
+       lis     r10, (MD_RSV4I | MD_RESETVAL)@h
+       ori     r10, r10, 0x1c00
+       mr      r8, r10
+#else
+       lis     r10, MD_RESETVAL@h
+#endif
+#ifndef CONFIG_8xx_COPYBACK
+       oris    r10, r10, MD_WTDEF@h
+#endif
+       mtspr   SPRN_MD_CTR, r10        /* Set data TLB control */
+
+       /* Now map the lower 8 Meg into the TLBs.  For this quick hack,
+        * we can load the instruction and data TLB registers with the
+        * same values.
+        */
+       lis     r8, KERNELBASE@h        /* Create vaddr for TLB */
+       ori     r8, r8, MI_EVALID       /* Mark it valid */
+       mtspr   SPRN_MI_EPN, r8
+       mtspr   SPRN_MD_EPN, r8
+       li      r8, MI_PS8MEG           /* Set 8M byte page */
+       ori     r8, r8, MI_SVALID       /* Make it valid */
+       mtspr   SPRN_MI_TWC, r8
+       mtspr   SPRN_MD_TWC, r8
+       li      r8, MI_BOOTINIT         /* Create RPN for address 0 */
+       mtspr   SPRN_MI_RPN, r8         /* Store TLB entry */
+       mtspr   SPRN_MD_RPN, r8
+       lis     r8, MI_Kp@h             /* Set the protection mode */
+       mtspr   SPRN_MI_AP, r8
+       mtspr   SPRN_MD_AP, r8
+
+       /* Map another 8 MByte at the IMMR to get the processor
+        * internal registers (among other things).
+        */
+#ifdef CONFIG_PIN_TLB
+       addi    r10, r10, 0x0100
+       mtspr   SPRN_MD_CTR, r10
+#endif
+       mfspr   r9, 638                 /* Get current IMMR */
+       andis.  r9, r9, 0xff80          /* Get 8Mbyte boundary */
+
+       mr      r8, r9                  /* Create vaddr for TLB */
+       ori     r8, r8, MD_EVALID       /* Mark it valid */
+       mtspr   SPRN_MD_EPN, r8
+       li      r8, MD_PS8MEG           /* Set 8M byte page */
+       ori     r8, r8, MD_SVALID       /* Make it valid */
+       mtspr   SPRN_MD_TWC, r8
+       mr      r8, r9                  /* Create paddr for TLB */
+       ori     r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
+       mtspr   SPRN_MD_RPN, r8
+
+#ifdef CONFIG_PIN_TLB
+       /* Map two more 8M kernel data pages.
+       */
+       addi    r10, r10, 0x0100
+       mtspr   SPRN_MD_CTR, r10
+
+       lis     r8, KERNELBASE@h        /* Create vaddr for TLB */
+       addis   r8, r8, 0x0080          /* Add 8M */
+       ori     r8, r8, MI_EVALID       /* Mark it valid */
+       mtspr   SPRN_MD_EPN, r8
+       li      r9, MI_PS8MEG           /* Set 8M byte page */
+       ori     r9, r9, MI_SVALID       /* Make it valid */
+       mtspr   SPRN_MD_TWC, r9
+       li      r11, MI_BOOTINIT        /* Create RPN for address 0 */
+       addis   r11, r11, 0x0080        /* Add 8M */
+       mtspr   SPRN_MD_RPN, r8
+
+       addis   r8, r8, 0x0080          /* Add 8M */
+       mtspr   SPRN_MD_EPN, r8
+       mtspr   SPRN_MD_TWC, r9
+       addis   r11, r11, 0x0080        /* Add 8M */
+       mtspr   SPRN_MD_RPN, r8
+#endif
+
+       /* Since the cache is enabled according to the information we
+        * just loaded into the TLB, invalidate and enable the caches here.
+        * We should probably check/set other modes....later.
+        */
+       lis     r8, IDC_INVALL@h
+       mtspr   SPRN_IC_CST, r8
+       mtspr   SPRN_DC_CST, r8
+       lis     r8, IDC_ENABLE@h
+       mtspr   SPRN_IC_CST, r8
+#ifdef CONFIG_8xx_COPYBACK
+       mtspr   SPRN_DC_CST, r8
+#else
+       /* For a debug option, I left this here to easily enable
+        * the write through cache mode
+        */
+       lis     r8, DC_SFWT@h
+       mtspr   SPRN_DC_CST, r8
+       lis     r8, IDC_ENABLE@h
+       mtspr   SPRN_DC_CST, r8
+#endif
+       blr
+
+
+/*
+ * Set up to use a given MMU context.
+ * r3 is context number, r4 is PGD pointer.
+ *
+ * We place the physical address of the new task page directory loaded
+ * into the MMU base register, and set the ASID compare register with
+ * the new "context."
+ */
+_GLOBAL(set_context)
+
+#ifdef CONFIG_BDI_SWITCH
+       /* Context switch the PTE pointer for the Abatron BDI2000.
+        * The PGDIR is passed as second argument.
+        */
+       lis     r5, KERNELBASE@h
+       lwz     r5, 0xf0(r5)
+       stw     r4, 0x4(r5)
+#endif
+
+#ifdef CONFIG_8xx_CPU6
+       lis     r6, cpu6_errata_word@h
+       ori     r6, r6, cpu6_errata_word@l
+       tophys  (r4, r4)
+       li      r7, 0x3980
+       stw     r7, 12(r6)
+       lwz     r7, 12(r6)
+        mtspr   SPRN_M_TWB, r4               /* Update MMU base address */
+       li      r7, 0x3380
+       stw     r7, 12(r6)
+       lwz     r7, 12(r6)
+        mtspr   SPRN_M_CASID, r3             /* Update context */
+#else
+        mtspr   SPRN_M_CASID,r3                /* Update context */
+       tophys  (r4, r4)
+       mtspr   SPRN_M_TWB, r4          /* and pgd */
+#endif
+       SYNC
+       blr
+
+#ifdef CONFIG_8xx_CPU6
+/* It's here because it is unique to the 8xx.
+ * It is important we get called with interrupts disabled.  I used to
+ * do that, but it appears that all code that calls this already had
+ * interrupt disabled.
+ */
+       .globl  set_dec_cpu6
+set_dec_cpu6:
+       lis     r7, cpu6_errata_word@h
+       ori     r7, r7, cpu6_errata_word@l
+       li      r4, 0x2c00
+       stw     r4, 8(r7)
+       lwz     r4, 8(r7)
+        mtspr   22, r3         /* Update Decrementer */
+       SYNC
+       blr
+#endif
+
+/*
+ * We put a few things here that have to be page-aligned.
+ * This stuff goes at the beginning of the data segment,
+ * which is page-aligned.
+ */
+       .data
+       .globl  sdata
+sdata:
+       .globl  empty_zero_page
+empty_zero_page:
+       .space  4096
+
+       .globl  swapper_pg_dir
+swapper_pg_dir:
+       .space  4096
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * Used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+       .globl  cmd_line
+cmd_line:
+       .space  512
+
+/* Room for two PTE table poiners, usually the kernel and current user
+ * pointer to their respective root page table (pgdir).
+ */
+abatron_pteptrs:
+       .space  8
+
+#ifdef CONFIG_8xx_CPU6
+       .globl  cpu6_errata_word
+cpu6_errata_word:
+       .space  16
+#endif
+
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
new file mode 100644 (file)
index 0000000..5063c60
--- /dev/null
@@ -0,0 +1,1063 @@
+/*
+ * arch/ppc/kernel/head_fsl_booke.S
+ *
+ * Kernel execution entry point code.
+ *
+ *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
+ *      Initial PowerPC version.
+ *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *      Rewritten for PReP
+ *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ *      Low-level exception handers, MMU support, and rewrite.
+ *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
+ *      PowerPC 8xx modifications.
+ *    Copyright (c) 1998-1999 TiVo, Inc.
+ *      PowerPC 403GCX modifications.
+ *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ *      PowerPC 403GCX/405GP modifications.
+ *    Copyright 2000 MontaVista Software Inc.
+ *     PPC405 modifications
+ *      PowerPC 403GCX/405GP modifications.
+ *     Author: MontaVista Software, Inc.
+ *             frank_rowand@mvista.com or source@mvista.com
+ *             debbie_chu@mvista.com
+ *    Copyright 2002-2004 MontaVista Software, Inc.
+ *      PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
+ *    Copyright 2004 Freescale Semiconductor, Inc
+ *      PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include "head_booke.h"
+
+/* As with the other PowerPC ports, it is expected that when code
+ * execution begins here, the following registers contain valid, yet
+ * optional, information:
+ *
+ *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
+ *   r4 - Starting address of the init RAM disk
+ *   r5 - Ending address of the init RAM disk
+ *   r6 - Start of kernel command line string (e.g. "mem=128")
+ *   r7 - End of kernel command line string
+ *
+ */
+       .text
+_GLOBAL(_stext)
+_GLOBAL(_start)
+       /*
+        * Reserve a word at a fixed location to store the address
+        * of abatron_pteptrs
+        */
+       nop
+/*
+ * Save parameters we are passed
+ */
+       mr      r31,r3
+       mr      r30,r4
+       mr      r29,r5
+       mr      r28,r6
+       mr      r27,r7
+       li      r24,0           /* CPU number */
+
+/* We try to not make any assumptions about how the boot loader
+ * setup or used the TLBs.  We invalidate all mappings from the
+ * boot loader and load a single entry in TLB1[0] to map the
+ * first 16M of kernel memory.  Any boot info passed from the
+ * bootloader needs to live in this first 16M.
+ *
+ * Requirement on bootloader:
+ *  - The page we're executing in needs to reside in TLB1 and
+ *    have IPROT=1.  If not an invalidate broadcast could
+ *    evict the entry we're currently executing in.
+ *
+ *  r3 = Index of TLB1 were executing in
+ *  r4 = Current MSR[IS]
+ *  r5 = Index of TLB1 temp mapping
+ *
+ * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
+ * if needed
+ */
+
+/* 1. Find the index of the entry we're executing in */
+       bl      invstr                          /* Find our address */
+invstr:        mflr    r6                              /* Make it accessible */
+       mfmsr   r7
+       rlwinm  r4,r7,27,31,31                  /* extract MSR[IS] */
+       mfspr   r7, SPRN_PID0
+       slwi    r7,r7,16
+       or      r7,r7,r4
+       mtspr   SPRN_MAS6,r7
+       tlbsx   0,r6                            /* search MSR[IS], SPID=PID0 */
+#ifndef CONFIG_E200
+       mfspr   r7,SPRN_MAS1
+       andis.  r7,r7,MAS1_VALID@h
+       bne     match_TLB
+       mfspr   r7,SPRN_PID1
+       slwi    r7,r7,16
+       or      r7,r7,r4
+       mtspr   SPRN_MAS6,r7
+       tlbsx   0,r6                            /* search MSR[IS], SPID=PID1 */
+       mfspr   r7,SPRN_MAS1
+       andis.  r7,r7,MAS1_VALID@h
+       bne     match_TLB
+       mfspr   r7, SPRN_PID2
+       slwi    r7,r7,16
+       or      r7,r7,r4
+       mtspr   SPRN_MAS6,r7
+       tlbsx   0,r6                            /* Fall through, we had to match */
+#endif
+match_TLB:
+       mfspr   r7,SPRN_MAS0
+       rlwinm  r3,r7,16,20,31                  /* Extract MAS0(Entry) */
+
+       mfspr   r7,SPRN_MAS1                    /* Insure IPROT set */
+       oris    r7,r7,MAS1_IPROT@h
+       mtspr   SPRN_MAS1,r7
+       tlbwe
+
+/* 2. Invalidate all entries except the entry we're executing in */
+       mfspr   r9,SPRN_TLB1CFG
+       andi.   r9,r9,0xfff
+       li      r6,0                            /* Set Entry counter to 0 */
+1:     lis     r7,0x1000                       /* Set MAS0(TLBSEL) = 1 */
+       rlwimi  r7,r6,16,4,15                   /* Setup MAS0 = TLBSEL | ESEL(r6) */
+       mtspr   SPRN_MAS0,r7
+       tlbre
+       mfspr   r7,SPRN_MAS1
+       rlwinm  r7,r7,0,2,31                    /* Clear MAS1 Valid and IPROT */
+       cmpw    r3,r6
+       beq     skpinv                          /* Dont update the current execution TLB */
+       mtspr   SPRN_MAS1,r7
+       tlbwe
+       isync
+skpinv:        addi    r6,r6,1                         /* Increment */
+       cmpw    r6,r9                           /* Are we done? */
+       bne     1b                              /* If not, repeat */
+
+       /* Invalidate TLB0 */
+       li      r6,0x04
+       tlbivax 0,r6
+#ifdef CONFIG_SMP
+       tlbsync
+#endif
+       /* Invalidate TLB1 */
+       li      r6,0x0c
+       tlbivax 0,r6
+#ifdef CONFIG_SMP
+       tlbsync
+#endif
+       msync
+
+/* 3. Setup a temp mapping and jump to it */
+       andi.   r5, r3, 0x1     /* Find an entry not used and is non-zero */
+       addi    r5, r5, 0x1
+       lis     r7,0x1000       /* Set MAS0(TLBSEL) = 1 */
+       rlwimi  r7,r3,16,4,15   /* Setup MAS0 = TLBSEL | ESEL(r3) */
+       mtspr   SPRN_MAS0,r7
+       tlbre
+
+       /* Just modify the entry ID and EPN for the temp mapping */
+       lis     r7,0x1000       /* Set MAS0(TLBSEL) = 1 */
+       rlwimi  r7,r5,16,4,15   /* Setup MAS0 = TLBSEL | ESEL(r5) */
+       mtspr   SPRN_MAS0,r7
+       xori    r6,r4,1         /* Setup TMP mapping in the other Address space */
+       slwi    r6,r6,12
+       oris    r6,r6,(MAS1_VALID|MAS1_IPROT)@h
+       ori     r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
+       mtspr   SPRN_MAS1,r6
+       mfspr   r6,SPRN_MAS2
+       li      r7,0            /* temp EPN = 0 */
+       rlwimi  r7,r6,0,20,31
+       mtspr   SPRN_MAS2,r7
+       tlbwe
+
+       xori    r6,r4,1
+       slwi    r6,r6,5         /* setup new context with other address space */
+       bl      1f              /* Find our address */
+1:     mflr    r9
+       rlwimi  r7,r9,0,20,31
+       addi    r7,r7,24
+       mtspr   SPRN_SRR0,r7
+       mtspr   SPRN_SRR1,r6
+       rfi
+
+/* 4. Clear out PIDs & Search info */
+       li      r6,0
+       mtspr   SPRN_PID0,r6
+#ifndef CONFIG_E200
+       mtspr   SPRN_PID1,r6
+       mtspr   SPRN_PID2,r6
+#endif
+       mtspr   SPRN_MAS6,r6
+
+/* 5. Invalidate mapping we started in */
+       lis     r7,0x1000       /* Set MAS0(TLBSEL) = 1 */
+       rlwimi  r7,r3,16,4,15   /* Setup MAS0 = TLBSEL | ESEL(r3) */
+       mtspr   SPRN_MAS0,r7
+       tlbre
+       li      r6,0
+       mtspr   SPRN_MAS1,r6
+       tlbwe
+       /* Invalidate TLB1 */
+       li      r9,0x0c
+       tlbivax 0,r9
+#ifdef CONFIG_SMP
+       tlbsync
+#endif
+       msync
+
+/* 6. Setup KERNELBASE mapping in TLB1[0] */
+       lis     r6,0x1000               /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
+       mtspr   SPRN_MAS0,r6
+       lis     r6,(MAS1_VALID|MAS1_IPROT)@h
+       ori     r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l
+       mtspr   SPRN_MAS1,r6
+       li      r7,0
+       lis     r6,KERNELBASE@h
+       ori     r6,r6,KERNELBASE@l
+       rlwimi  r6,r7,0,20,31
+       mtspr   SPRN_MAS2,r6
+       li      r7,(MAS3_SX|MAS3_SW|MAS3_SR)
+       mtspr   SPRN_MAS3,r7
+       tlbwe
+
+/* 7. Jump to KERNELBASE mapping */
+       lis     r7,MSR_KERNEL@h
+       ori     r7,r7,MSR_KERNEL@l
+       bl      1f                      /* Find our address */
+1:     mflr    r9
+       rlwimi  r6,r9,0,20,31
+       addi    r6,r6,24
+       mtspr   SPRN_SRR0,r6
+       mtspr   SPRN_SRR1,r7
+       rfi                             /* start execution out of TLB1[0] entry */
+
+/* 8. Clear out the temp mapping */
+       lis     r7,0x1000       /* Set MAS0(TLBSEL) = 1 */
+       rlwimi  r7,r5,16,4,15   /* Setup MAS0 = TLBSEL | ESEL(r5) */
+       mtspr   SPRN_MAS0,r7
+       tlbre
+       mtspr   SPRN_MAS1,r8
+       tlbwe
+       /* Invalidate TLB1 */
+       li      r9,0x0c
+       tlbivax 0,r9
+#ifdef CONFIG_SMP
+       tlbsync
+#endif
+       msync
+
+       /* Establish the interrupt vector offsets */
+       SET_IVOR(0,  CriticalInput);
+       SET_IVOR(1,  MachineCheck);
+       SET_IVOR(2,  DataStorage);
+       SET_IVOR(3,  InstructionStorage);
+       SET_IVOR(4,  ExternalInput);
+       SET_IVOR(5,  Alignment);
+       SET_IVOR(6,  Program);
+       SET_IVOR(7,  FloatingPointUnavailable);
+       SET_IVOR(8,  SystemCall);
+       SET_IVOR(9,  AuxillaryProcessorUnavailable);
+       SET_IVOR(10, Decrementer);
+       SET_IVOR(11, FixedIntervalTimer);
+       SET_IVOR(12, WatchdogTimer);
+       SET_IVOR(13, DataTLBError);
+       SET_IVOR(14, InstructionTLBError);
+       SET_IVOR(15, Debug);
+       SET_IVOR(32, SPEUnavailable);
+       SET_IVOR(33, SPEFloatingPointData);
+       SET_IVOR(34, SPEFloatingPointRound);
+#ifndef CONFIG_E200
+       SET_IVOR(35, PerformanceMonitor);
+#endif
+
+       /* Establish the interrupt vector base */
+       lis     r4,interrupt_base@h     /* IVPR only uses the high 16-bits */
+       mtspr   SPRN_IVPR,r4
+
+       /* Setup the defaults for TLB entries */
+       li      r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
+#ifdef CONFIG_E200
+       oris    r2,r2,MAS4_TLBSELD(1)@h
+#endif
+       mtspr   SPRN_MAS4, r2
+
+#if 0
+       /* Enable DOZE */
+       mfspr   r2,SPRN_HID0
+       oris    r2,r2,HID0_DOZE@h
+       mtspr   SPRN_HID0, r2
+#endif
+#ifdef CONFIG_E200
+       /* enable dedicated debug exception handling resources (Debug APU) */
+       mfspr   r2,SPRN_HID0
+       ori     r2,r2,HID0_DAPUEN@l
+       mtspr   SPRN_HID0,r2
+#endif
+
+#if !defined(CONFIG_BDI_SWITCH)
+       /*
+        * The Abatron BDI JTAG debugger does not tolerate others
+        * mucking with the debug registers.
+        */
+       lis     r2,DBCR0_IDM@h
+       mtspr   SPRN_DBCR0,r2
+       /* clear any residual debug events */
+       li      r2,-1
+       mtspr   SPRN_DBSR,r2
+#endif
+
+       /*
+        * This is where the main kernel code starts.
+        */
+
+       /* ptr to current */
+       lis     r2,init_task@h
+       ori     r2,r2,init_task@l
+
+       /* ptr to current thread */
+       addi    r4,r2,THREAD    /* init task's THREAD */
+       mtspr   SPRN_SPRG3,r4
+
+       /* stack */
+       lis     r1,init_thread_union@h
+       ori     r1,r1,init_thread_union@l
+       li      r0,0
+       stwu    r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+
+       bl      early_init
+
+       mfspr   r3,SPRN_TLB1CFG
+       andi.   r3,r3,0xfff
+       lis     r4,num_tlbcam_entries@ha
+       stw     r3,num_tlbcam_entries@l(r4)
+/*
+ * Decide what sort of machine this is and initialize the MMU.
+ */
+       mr      r3,r31
+       mr      r4,r30
+       mr      r5,r29
+       mr      r6,r28
+       mr      r7,r27
+       bl      machine_init
+       bl      MMU_init
+
+       /* Setup PTE pointers for the Abatron bdiGDB */
+       lis     r6, swapper_pg_dir@h
+       ori     r6, r6, swapper_pg_dir@l
+       lis     r5, abatron_pteptrs@h
+       ori     r5, r5, abatron_pteptrs@l
+       lis     r4, KERNELBASE@h
+       ori     r4, r4, KERNELBASE@l
+       stw     r5, 0(r4)       /* Save abatron_pteptrs at a fixed location */
+       stw     r6, 0(r5)
+
+       /* Let's move on */
+       lis     r4,start_kernel@h
+       ori     r4,r4,start_kernel@l
+       lis     r3,MSR_KERNEL@h
+       ori     r3,r3,MSR_KERNEL@l
+       mtspr   SPRN_SRR0,r4
+       mtspr   SPRN_SRR1,r3
+       rfi                     /* change context and jump to start_kernel */
+
+/* Macros to hide the PTE size differences
+ *
+ * FIND_PTE -- walks the page tables given EA & pgdir pointer
+ *   r10 -- EA of fault
+ *   r11 -- PGDIR pointer
+ *   r12 -- free
+ *   label 2: is the bailout case
+ *
+ * if we find the pte (fall through):
+ *   r11 is low pte word
+ *   r12 is pointer to the pte
+ */
+#ifdef CONFIG_PTE_64BIT
+#define PTE_FLAGS_OFFSET       4
+#define FIND_PTE       \
+       rlwinm  r12, r10, 13, 19, 29;   /* Compute pgdir/pmd offset */  \
+       lwzx    r11, r12, r11;          /* Get pgd/pmd entry */         \
+       rlwinm. r12, r11, 0, 0, 20;     /* Extract pt base address */   \
+       beq     2f;                     /* Bail if no table */          \
+       rlwimi  r12, r10, 23, 20, 28;   /* Compute pte address */       \
+       lwz     r11, 4(r12);            /* Get pte entry */
+#else
+#define PTE_FLAGS_OFFSET       0
+#define FIND_PTE       \
+       rlwimi  r11, r10, 12, 20, 29;   /* Create L1 (pgdir/pmd) address */     \
+       lwz     r11, 0(r11);            /* Get L1 entry */                      \
+       rlwinm. r12, r11, 0, 0, 19;     /* Extract L2 (pte) base address */     \
+       beq     2f;                     /* Bail if no table */                  \
+       rlwimi  r12, r10, 22, 20, 29;   /* Compute PTE address */               \
+       lwz     r11, 0(r12);            /* Get Linux PTE */
+#endif
+
+/*
+ * Interrupt vector entry code
+ *
+ * The Book E MMUs are always on so we don't need to handle
+ * interrupts in real mode as with previous PPC processors. In
+ * this case we handle interrupts in the kernel virtual address
+ * space.
+ *
+ * Interrupt vectors are dynamically placed relative to the
+ * interrupt prefix as determined by the address of interrupt_base.
+ * The interrupt vectors offsets are programmed using the labels
+ * for each interrupt vector entry.
+ *
+ * Interrupt vectors must be aligned on a 16 byte boundary.
+ * We align on a 32 byte cache line boundary for good measure.
+ */
+
+interrupt_base:
+       /* Critical Input Interrupt */
+       CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+
+       /* Machine Check Interrupt */
+#ifdef CONFIG_E200
+       /* no RFMCI, MCSRRs on E200 */
+       CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+#else
+       MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+#endif
+
+       /* Data Storage Interrupt */
+       START_EXCEPTION(DataStorage)
+       mtspr   SPRN_SPRG0, r10         /* Save some working registers */
+       mtspr   SPRN_SPRG1, r11
+       mtspr   SPRN_SPRG4W, r12
+       mtspr   SPRN_SPRG5W, r13
+       mfcr    r11
+       mtspr   SPRN_SPRG7W, r11
+
+       /*
+        * Check if it was a store fault, if not then bail
+        * because a user tried to access a kernel or
+        * read-protected page.  Otherwise, get the
+        * offending address and handle it.
+        */
+       mfspr   r10, SPRN_ESR
+       andis.  r10, r10, ESR_ST@h
+       beq     2f
+
+       mfspr   r10, SPRN_DEAR          /* Get faulting address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11, TASK_SIZE@h
+       ori     r11, r11, TASK_SIZE@l
+       cmplw   0, r10, r11
+       bge     2f
+
+       /* Get the PGD for the current thread */
+3:
+       mfspr   r11,SPRN_SPRG3
+       lwz     r11,PGDIR(r11)
+4:
+       FIND_PTE
+
+       /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
+       andi.   r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
+       cmpwi   0, r13, _PAGE_RW|_PAGE_USER
+       bne     2f                      /* Bail if not */
+
+       /* Update 'changed'. */
+       ori     r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+       stw     r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
+
+       /* MAS2 not updated as the entry does exist in the tlb, this
+          fault taken to detect state transition (eg: COW -> DIRTY)
+        */
+       andi.   r11, r11, _PAGE_HWEXEC
+       rlwimi  r11, r11, 31, 27, 27    /* SX <- _PAGE_HWEXEC */
+       ori     r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
+
+       /* update search PID in MAS6, AS = 0 */
+       mfspr   r12, SPRN_PID0
+       slwi    r12, r12, 16
+       mtspr   SPRN_MAS6, r12
+
+       /* find the TLB index that caused the fault.  It has to be here. */
+       tlbsx   0, r10
+
+       /* only update the perm bits, assume the RPN is fine */
+       mfspr   r12, SPRN_MAS3
+       rlwimi  r12, r11, 0, 20, 31
+       mtspr   SPRN_MAS3,r12
+       tlbwe
+
+       /* Done...restore registers and get out of here.  */
+       mfspr   r11, SPRN_SPRG7R
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG5R
+       mfspr   r12, SPRN_SPRG4R
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       rfi                     /* Force context change */
+
+2:
+       /*
+        * The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+       mfspr   r11, SPRN_SPRG7R
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG5R
+       mfspr   r12, SPRN_SPRG4R
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       b       data_access
+
+       /* Instruction Storage Interrupt */
+       INSTRUCTION_STORAGE_EXCEPTION
+
+       /* External Input Interrupt */
+       EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+
+       /* Alignment Interrupt */
+       ALIGNMENT_EXCEPTION
+
+       /* Program Interrupt */
+       PROGRAM_EXCEPTION
+
+       /* Floating Point Unavailable Interrupt */
+#ifdef CONFIG_PPC_FPU
+       FP_UNAVAILABLE_EXCEPTION
+#else
+#ifdef CONFIG_E200
+       /* E200 treats 'normal' floating point instructions as FP Unavail exception */
+       EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
+#else
+       EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+#endif
+#endif
+
+       /* System Call Interrupt */
+       START_EXCEPTION(SystemCall)
+       NORMAL_EXCEPTION_PROLOG
+       EXC_XFER_EE_LITE(0x0c00, DoSyscall)
+
+       /* Auxillary Processor Unavailable Interrupt */
+       EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+
+       /* Decrementer Interrupt */
+       DECREMENTER_EXCEPTION
+
+       /* Fixed Internal Timer Interrupt */
+       /* TODO: Add FIT support */
+       EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
+
+       /* Watchdog Timer Interrupt */
+#ifdef CONFIG_BOOKE_WDT
+       CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
+#else
+       CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
+#endif
+
+       /* Data TLB Error Interrupt */
+       START_EXCEPTION(DataTLBError)
+       mtspr   SPRN_SPRG0, r10         /* Save some working registers */
+       mtspr   SPRN_SPRG1, r11
+       mtspr   SPRN_SPRG4W, r12
+       mtspr   SPRN_SPRG5W, r13
+       mfcr    r11
+       mtspr   SPRN_SPRG7W, r11
+       mfspr   r10, SPRN_DEAR          /* Get faulting address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11, TASK_SIZE@h
+       ori     r11, r11, TASK_SIZE@l
+       cmplw   5, r10, r11
+       blt     5, 3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+
+       mfspr   r12,SPRN_MAS1           /* Set TID to 0 */
+       rlwinm  r12,r12,0,16,1
+       mtspr   SPRN_MAS1,r12
+
+       b       4f
+
+       /* Get the PGD for the current thread */
+3:
+       mfspr   r11,SPRN_SPRG3
+       lwz     r11,PGDIR(r11)
+
+4:
+       FIND_PTE
+       andi.   r13, r11, _PAGE_PRESENT /* Is the page present? */
+       beq     2f                      /* Bail if not present */
+
+#ifdef CONFIG_PTE_64BIT
+       lwz     r13, 0(r12)
+#endif
+       ori     r11, r11, _PAGE_ACCESSED
+       stw     r11, PTE_FLAGS_OFFSET(r12)
+
+        /* Jump to common tlb load */
+       b       finish_tlb_load
+2:
+       /* The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+       mfspr   r11, SPRN_SPRG7R
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG5R
+       mfspr   r12, SPRN_SPRG4R
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       b       data_access
+
+       /* Instruction TLB Error Interrupt */
+       /*
+        * Nearly the same as above, except we get our
+        * information from different registers and bailout
+        * to a different point.
+        */
+       START_EXCEPTION(InstructionTLBError)
+       mtspr   SPRN_SPRG0, r10         /* Save some working registers */
+       mtspr   SPRN_SPRG1, r11
+       mtspr   SPRN_SPRG4W, r12
+       mtspr   SPRN_SPRG5W, r13
+       mfcr    r11
+       mtspr   SPRN_SPRG7W, r11
+       mfspr   r10, SPRN_SRR0          /* Get faulting address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11, TASK_SIZE@h
+       ori     r11, r11, TASK_SIZE@l
+       cmplw   5, r10, r11
+       blt     5, 3f
+       lis     r11, swapper_pg_dir@h
+       ori     r11, r11, swapper_pg_dir@l
+
+       mfspr   r12,SPRN_MAS1           /* Set TID to 0 */
+       rlwinm  r12,r12,0,16,1
+       mtspr   SPRN_MAS1,r12
+
+       b       4f
+
+       /* Get the PGD for the current thread */
+3:
+       mfspr   r11,SPRN_SPRG3
+       lwz     r11,PGDIR(r11)
+
+4:
+       FIND_PTE
+       andi.   r13, r11, _PAGE_PRESENT /* Is the page present? */
+       beq     2f                      /* Bail if not present */
+
+#ifdef CONFIG_PTE_64BIT
+       lwz     r13, 0(r12)
+#endif
+       ori     r11, r11, _PAGE_ACCESSED
+       stw     r11, PTE_FLAGS_OFFSET(r12)
+
+       /* Jump to common TLB load point */
+       b       finish_tlb_load
+
+2:
+       /* The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+       mfspr   r11, SPRN_SPRG7R
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG5R
+       mfspr   r12, SPRN_SPRG4R
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       b       InstructionStorage
+
+#ifdef CONFIG_SPE
+       /* SPE Unavailable */
+       START_EXCEPTION(SPEUnavailable)
+       NORMAL_EXCEPTION_PROLOG
+       bne     load_up_spe
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_EE_LITE(0x2010, KernelSPE)
+#else
+       EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
+#endif /* CONFIG_SPE */
+
+       /* SPE Floating Point Data */
+#ifdef CONFIG_SPE
+       EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
+#else
+       EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
+#endif /* CONFIG_SPE */
+
+       /* SPE Floating Point Round */
+       EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
+
+       /* Performance Monitor */
+       EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
+
+
+       /* Debug Interrupt */
+       DEBUG_EXCEPTION
+
+/*
+ * Local functions
+ */
+
+       /*
+        * Data TLB exceptions will bail out to this point
+        * if they can't resolve the lightweight TLB fault.
+        */
+data_access:
+       NORMAL_EXCEPTION_PROLOG
+       mfspr   r5,SPRN_ESR             /* Grab the ESR, save it, pass arg3 */
+       stw     r5,_ESR(r11)
+       mfspr   r4,SPRN_DEAR            /* Grab the DEAR, save it, pass arg2 */
+       andis.  r10,r5,(ESR_ILK|ESR_DLK)@h
+       bne     1f
+       EXC_XFER_EE_LITE(0x0300, handle_page_fault)
+1:
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       EXC_XFER_EE_LITE(0x0300, CacheLockingException)
+
+/*
+
+ * Both the instruction and data TLB miss get to this
+ * point to load the TLB.
+ *     r10 - EA of fault
+ *     r11 - TLB (info from Linux PTE)
+ *     r12, r13 - available to use
+ *     CR5 - results of addr < TASK_SIZE
+ *     MAS0, MAS1 - loaded with proper value when we get here
+ *     MAS2, MAS3 - will need additional info from Linux PTE
+ *     Upon exit, we reload everything and RFI.
+ */
+finish_tlb_load:
+       /*
+        * We set execute, because we don't have the granularity to
+        * properly set this at the page level (Linux problem).
+        * Many of these bits are software only.  Bits we don't set
+        * here we (properly should) assume have the appropriate value.
+        */
+
+       mfspr   r12, SPRN_MAS2
+#ifdef CONFIG_PTE_64BIT
+       rlwimi  r12, r11, 26, 24, 31    /* extract ...WIMGE from pte */
+#else
+       rlwimi  r12, r11, 26, 27, 31    /* extract WIMGE from pte */
+#endif
+       mtspr   SPRN_MAS2, r12
+
+       bge     5, 1f
+
+       /* is user addr */
+       andi.   r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
+       andi.   r10, r11, _PAGE_USER    /* Test for _PAGE_USER */
+       srwi    r10, r12, 1
+       or      r12, r12, r10   /* Copy user perms into supervisor */
+       iseleq  r12, 0, r12
+       b       2f
+
+       /* is kernel addr */
+1:     rlwinm  r12, r11, 31, 29, 29    /* Extract _PAGE_HWWRITE into SW */
+       ori     r12, r12, (MAS3_SX | MAS3_SR)
+
+#ifdef CONFIG_PTE_64BIT
+2:     rlwimi  r12, r13, 24, 0, 7      /* grab RPN[32:39] */
+       rlwimi  r12, r11, 24, 8, 19     /* grab RPN[40:51] */
+       mtspr   SPRN_MAS3, r12
+BEGIN_FTR_SECTION
+       srwi    r10, r13, 8             /* grab RPN[8:31] */
+       mtspr   SPRN_MAS7, r10
+END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
+#else
+2:     rlwimi  r11, r12, 0, 20, 31     /* Extract RPN from PTE and merge with perms */
+       mtspr   SPRN_MAS3, r11
+#endif
+#ifdef CONFIG_E200
+       /* Round robin TLB1 entries assignment */
+       mfspr   r12, SPRN_MAS0
+
+       /* Extract TLB1CFG(NENTRY) */
+       mfspr   r11, SPRN_TLB1CFG
+       andi.   r11, r11, 0xfff
+
+       /* Extract MAS0(NV) */
+       andi.   r13, r12, 0xfff
+       addi    r13, r13, 1
+       cmpw    0, r13, r11
+       addi    r12, r12, 1
+
+       /* check if we need to wrap */
+       blt     7f
+
+       /* wrap back to first free tlbcam entry */
+       lis     r13, tlbcam_index@ha
+       lwz     r13, tlbcam_index@l(r13)
+       rlwimi  r12, r13, 0, 20, 31
+7:
+       mtspr   SPRN_MAS0,r12
+#endif /* CONFIG_E200 */
+
+       tlbwe
+
+       /* Done...restore registers and get out of here.  */
+       mfspr   r11, SPRN_SPRG7R
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG5R
+       mfspr   r12, SPRN_SPRG4R
+       mfspr   r11, SPRN_SPRG1
+       mfspr   r10, SPRN_SPRG0
+       rfi                                     /* Force context change */
+
+#ifdef CONFIG_SPE
+/* Note that the SPE support is closely modeled after the AltiVec
+ * support.  Changes to one are likely to be applicable to the
+ * other!  */
+load_up_spe:
+/*
+ * Disable SPE for the task which had SPE previously,
+ * and save its SPE registers in its thread_struct.
+ * Enables SPE for use in the kernel on return.
+ * On SMP we know the SPE units are free, since we give it up every
+ * switch.  -- Kumar
+ */
+       mfmsr   r5
+       oris    r5,r5,MSR_SPE@h
+       mtmsr   r5                      /* enable use of SPE now */
+       isync
+/*
+ * For SMP, we don't do lazy SPE switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another.  Instead we call giveup_spe in switch_to.
+ */
+#ifndef CONFIG_SMP
+       lis     r3,last_task_used_spe@ha
+       lwz     r4,last_task_used_spe@l(r3)
+       cmpi    0,r4,0
+       beq     1f
+       addi    r4,r4,THREAD    /* want THREAD of last_task_used_spe */
+       SAVE_32EVRS(0,r10,r4)
+       evxor   evr10, evr10, evr10     /* clear out evr10 */
+       evmwumiaa evr10, evr10, evr10   /* evr10 <- ACC = 0 * 0 + ACC */
+       li      r5,THREAD_ACC
+       evstddx evr10, r4, r5           /* save off accumulator */
+       lwz     r5,PT_REGS(r4)
+       lwz     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       lis     r10,MSR_SPE@h
+       andc    r4,r4,r10       /* disable SPE for previous task */
+       stw     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+       /* enable use of SPE after return */
+       oris    r9,r9,MSR_SPE@h
+       mfspr   r5,SPRN_SPRG3           /* current task's THREAD (phys) */
+       li      r4,1
+       li      r10,THREAD_ACC
+       stw     r4,THREAD_USED_SPE(r5)
+       evlddx  evr4,r10,r5
+       evmra   evr4,evr4
+       REST_32EVRS(0,r10,r5)
+#ifndef CONFIG_SMP
+       subi    r4,r5,THREAD
+       stw     r4,last_task_used_spe@l(r3)
+#endif /* CONFIG_SMP */
+       /* restore registers and return */
+2:     REST_4GPRS(3, r11)
+       lwz     r10,_CCR(r11)
+       REST_GPR(1, r11)
+       mtcr    r10
+       lwz     r10,_LINK(r11)
+       mtlr    r10
+       REST_GPR(10, r11)
+       mtspr   SPRN_SRR1,r9
+       mtspr   SPRN_SRR0,r12
+       REST_GPR(9, r11)
+       REST_GPR(12, r11)
+       lwz     r11,GPR11(r11)
+       SYNC
+       rfi
+
+/*
+ * SPE unavailable trap from kernel - print a message, but let
+ * the task use SPE in the kernel until it returns to user mode.
+ */
+KernelSPE:
+       lwz     r3,_MSR(r1)
+       oris    r3,r3,MSR_SPE@h
+       stw     r3,_MSR(r1)     /* enable use of SPE after return */
+       lis     r3,87f@h
+       ori     r3,r3,87f@l
+       mr      r4,r2           /* current */
+       lwz     r5,_NIP(r1)
+       bl      printk
+       b       ret_from_except
+87:    .string "SPE used in kernel  (task=%p, pc=%x)  \n"
+       .align  4,0
+
+#endif /* CONFIG_SPE */
+
+/*
+ * Global functions
+ */
+
+/*
+ * extern void loadcam_entry(unsigned int index)
+ *
+ * Load TLBCAM[index] entry in to the L2 CAM MMU
+ */
+_GLOBAL(loadcam_entry)
+       lis     r4,TLBCAM@ha
+       addi    r4,r4,TLBCAM@l
+       mulli   r5,r3,20
+       add     r3,r5,r4
+       lwz     r4,0(r3)
+       mtspr   SPRN_MAS0,r4
+       lwz     r4,4(r3)
+       mtspr   SPRN_MAS1,r4
+       lwz     r4,8(r3)
+       mtspr   SPRN_MAS2,r4
+       lwz     r4,12(r3)
+       mtspr   SPRN_MAS3,r4
+       tlbwe
+       isync
+       blr
+
+/*
+ * extern void giveup_altivec(struct task_struct *prev)
+ *
+ * The e500 core does not have an AltiVec unit.
+ */
+_GLOBAL(giveup_altivec)
+       blr
+
+#ifdef CONFIG_SPE
+/*
+ * extern void giveup_spe(struct task_struct *prev)
+ *
+ */
+_GLOBAL(giveup_spe)
+       mfmsr   r5
+       oris    r5,r5,MSR_SPE@h
+       SYNC
+       mtmsr   r5                      /* enable use of SPE now */
+       isync
+       cmpi    0,r3,0
+       beqlr-                          /* if no previous owner, done */
+       addi    r3,r3,THREAD            /* want THREAD of task */
+       lwz     r5,PT_REGS(r3)
+       cmpi    0,r5,0
+       SAVE_32EVRS(0, r4, r3)
+       evxor   evr6, evr6, evr6        /* clear out evr6 */
+       evmwumiaa evr6, evr6, evr6      /* evr6 <- ACC = 0 * 0 + ACC */
+       li      r4,THREAD_ACC
+       evstddx evr6, r4, r3            /* save off accumulator */
+       mfspr   r6,SPRN_SPEFSCR
+       stw     r6,THREAD_SPEFSCR(r3)   /* save spefscr register value */
+       beq     1f
+       lwz     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       lis     r3,MSR_SPE@h
+       andc    r4,r4,r3                /* disable SPE for previous task */
+       stw     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+       li      r5,0
+       lis     r4,last_task_used_spe@ha
+       stw     r5,last_task_used_spe@l(r4)
+#endif /* CONFIG_SMP */
+       blr
+#endif /* CONFIG_SPE */
+
+/*
+ * extern void giveup_fpu(struct task_struct *prev)
+ *
+ * Not all FSL Book-E cores have an FPU
+ */
+#ifndef CONFIG_PPC_FPU
+_GLOBAL(giveup_fpu)
+       blr
+#endif
+
+/*
+ * extern void abort(void)
+ *
+ * At present, this routine just applies a system reset.
+ */
+_GLOBAL(abort)
+       li      r13,0
+        mtspr   SPRN_DBCR0,r13         /* disable all debug events */
+       mfmsr   r13
+       ori     r13,r13,MSR_DE@l        /* Enable Debug Events */
+       mtmsr   r13
+        mfspr   r13,SPRN_DBCR0
+        lis    r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
+        mtspr   SPRN_DBCR0,r13
+
+_GLOBAL(set_context)
+
+#ifdef CONFIG_BDI_SWITCH
+       /* Context switch the PTE pointer for the Abatron BDI2000.
+        * The PGDIR is the second parameter.
+        */
+       lis     r5, abatron_pteptrs@h
+       ori     r5, r5, abatron_pteptrs@l
+       stw     r4, 0x4(r5)
+#endif
+       mtspr   SPRN_PID,r3
+       isync                   /* Force context change */
+       blr
+
+/*
+ * We put a few things here that have to be page-aligned. This stuff
+ * goes at the beginning of the data segment, which is page-aligned.
+ */
+       .data
+       .align  12
+       .globl  sdata
+sdata:
+       .globl  empty_zero_page
+empty_zero_page:
+       .space  4096
+       .globl  swapper_pg_dir
+swapper_pg_dir:
+       .space  4096
+
+/* Reserved 4k for the critical exception stack & 4k for the machine
+ * check stack per CPU for kernel mode exceptions */
+       .section .bss
+        .align 12
+exception_stack_bottom:
+       .space  BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
+       .globl  exception_stack_top
+exception_stack_top:
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+       .globl  cmd_line
+cmd_line:
+       .space  512
+
+/*
+ * Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+abatron_pteptrs:
+       .space  8
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
new file mode 100644 (file)
index 0000000..444fdcc
--- /dev/null
@@ -0,0 +1,233 @@
+/*
+ *  This file contains the power_save function for 6xx & 7xxx CPUs
+ *  rewritten in assembler
+ *
+ *  Warning ! This code assumes that if your machine has a 750fx
+ *  it will have PLL 1 set to low speed mode (used during NAP/DOZE).
+ *  if this is not the case some additional changes will have to
+ *  be done to check a runtime var (a bit like powersave-nap)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+#undef DEBUG
+
+       .text
+
+/*
+ * Init idle, called at early CPU setup time from head.S for each CPU
+ * Make sure no rest of NAP mode remains in HID0, save default
+ * values for some CPU specific registers. Called with r24
+ * containing CPU number and r3 reloc offset
+ */
+_GLOBAL(init_idle_6xx)
+BEGIN_FTR_SECTION
+       mfspr   r4,SPRN_HID0
+       rlwinm  r4,r4,0,10,8    /* Clear NAP */
+       mtspr   SPRN_HID0, r4
+       b       1f
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+       blr
+1:
+       slwi    r5,r24,2
+       add     r5,r5,r3
+BEGIN_FTR_SECTION
+       mfspr   r4,SPRN_MSSCR0
+       addis   r6,r5, nap_save_msscr0@ha
+       stw     r4,nap_save_msscr0@l(r6)
+END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
+BEGIN_FTR_SECTION
+       mfspr   r4,SPRN_HID1
+       addis   r6,r5,nap_save_hid1@ha
+       stw     r4,nap_save_hid1@l(r6)
+END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
+       blr
+
+/*
+ * Here is the power_save_6xx function. This could eventually be
+ * split into several functions & changing the function pointer
+ * depending on the various features.
+ */
+_GLOBAL(ppc6xx_idle)
+       /* Check if we can nap or doze, put HID0 mask in r3
+        */
+       lis     r3, 0
+BEGIN_FTR_SECTION
+       lis     r3,HID0_DOZE@h
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
+BEGIN_FTR_SECTION
+       /* We must dynamically check for the NAP feature as it
+        * can be cleared by CPU init after the fixups are done
+        */
+       lis     r4,cur_cpu_spec@ha
+       lwz     r4,cur_cpu_spec@l(r4)
+       lwz     r4,CPU_SPEC_FEATURES(r4)
+       andi.   r0,r4,CPU_FTR_CAN_NAP
+       beq     1f
+       /* Now check if user or arch enabled NAP mode */
+       lis     r4,powersave_nap@ha
+       lwz     r4,powersave_nap@l(r4)
+       cmpwi   0,r4,0
+       beq     1f
+       lis     r3,HID0_NAP@h
+1:     
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+       cmpwi   0,r3,0
+       beqlr
+
+       /* Clear MSR:EE */
+       mfmsr   r7
+       rlwinm  r0,r7,0,17,15
+       mtmsr   r0
+
+       /* Check current_thread_info()->flags */
+       rlwinm  r4,r1,0,0,18
+       lwz     r4,TI_FLAGS(r4)
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       beq     1f
+       mtmsr   r7      /* out of line this ? */
+       blr
+1:     
+       /* Some pre-nap cleanups needed on some CPUs */
+       andis.  r0,r3,HID0_NAP@h
+       beq     2f
+BEGIN_FTR_SECTION
+       /* Disable L2 prefetch on some 745x and try to ensure
+        * L2 prefetch engines are idle. As explained by errata
+        * text, we can't be sure they are, we just hope very hard
+        * that well be enough (sic !). At least I noticed Apple
+        * doesn't even bother doing the dcbf's here...
+        */
+       mfspr   r4,SPRN_MSSCR0
+       rlwinm  r4,r4,0,0,29
+       sync
+       mtspr   SPRN_MSSCR0,r4
+       sync
+       isync
+       lis     r4,KERNELBASE@h
+       dcbf    0,r4
+       dcbf    0,r4
+       dcbf    0,r4
+       dcbf    0,r4
+END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
+#ifdef DEBUG
+       lis     r6,nap_enter_count@ha
+       lwz     r4,nap_enter_count@l(r6)
+       addi    r4,r4,1
+       stw     r4,nap_enter_count@l(r6)
+#endif 
+2:
+BEGIN_FTR_SECTION
+       /* Go to low speed mode on some 750FX */
+       lis     r4,powersave_lowspeed@ha
+       lwz     r4,powersave_lowspeed@l(r4)
+       cmpwi   0,r4,0
+       beq     1f
+       mfspr   r4,SPRN_HID1
+       oris    r4,r4,0x0001
+       mtspr   SPRN_HID1,r4
+1:     
+END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
+
+       /* Go to NAP or DOZE now */     
+       mfspr   r4,SPRN_HID0
+       lis     r5,(HID0_NAP|HID0_SLEEP)@h
+BEGIN_FTR_SECTION
+       oris    r5,r5,HID0_DOZE@h
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
+       andc    r4,r4,r5
+       or      r4,r4,r3
+BEGIN_FTR_SECTION
+       oris    r4,r4,HID0_DPM@h        /* that should be done once for all  */
+END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
+       mtspr   SPRN_HID0,r4
+BEGIN_FTR_SECTION
+       DSSALL
+       sync
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+       ori     r7,r7,MSR_EE /* Could be ommited (already set) */
+       oris    r7,r7,MSR_POW@h
+       sync
+       isync
+       mtmsr   r7
+       isync
+       sync
+       blr
+       
+/*
+ * Return from NAP/DOZE mode, restore some CPU specific registers,
+ * we are called with DR/IR still off and r2 containing physical
+ * address of current.
+ */
+_GLOBAL(power_save_6xx_restore)
+       mfspr   r11,SPRN_HID0
+       rlwinm. r11,r11,0,10,8  /* Clear NAP & copy NAP bit !state to cr1 EQ */
+       cror    4*cr1+eq,4*cr0+eq,4*cr0+eq
+BEGIN_FTR_SECTION
+       rlwinm  r11,r11,0,9,7   /* Clear DOZE */
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
+       mtspr   SPRN_HID0, r11
+
+#ifdef DEBUG
+       beq     cr1,1f
+       lis     r11,(nap_return_count-KERNELBASE)@ha
+       lwz     r9,nap_return_count@l(r11)
+       addi    r9,r9,1
+       stw     r9,nap_return_count@l(r11)
+1:
+#endif
+       
+       rlwinm  r9,r1,0,0,18
+       tophys(r9,r9)
+       lwz     r11,TI_CPU(r9)
+       slwi    r11,r11,2
+       /* Todo make sure all these are in the same page
+        * and load r22 (@ha part + CPU offset) only once
+        */
+BEGIN_FTR_SECTION
+       beq     cr1,1f
+       addis   r9,r11,(nap_save_msscr0-KERNELBASE)@ha
+       lwz     r9,nap_save_msscr0@l(r9)
+       mtspr   SPRN_MSSCR0, r9
+       sync
+       isync
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
+BEGIN_FTR_SECTION
+       addis   r9,r11,(nap_save_hid1-KERNELBASE)@ha
+       lwz     r9,nap_save_hid1@l(r9)
+       mtspr   SPRN_HID1, r9
+END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
+       b       transfer_to_handler_cont
+
+       .data
+
+_GLOBAL(nap_save_msscr0)
+       .space  4*NR_CPUS
+
+_GLOBAL(nap_save_hid1)
+       .space  4*NR_CPUS
+
+_GLOBAL(powersave_nap)
+       .long   0
+_GLOBAL(powersave_lowspeed)
+       .long   0
+
+#ifdef DEBUG
+_GLOBAL(nap_enter_count)
+       .space  4
+_GLOBAL(nap_return_count)
+       .space  4
+#endif
similarity index 95%
rename from arch/ppc64/kernel/idle_power4.S
rename to arch/powerpc/kernel/idle_power4.S
index ca02afe..1494e2f 100644 (file)
@@ -39,13 +39,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
         * can be cleared by CPU init after the fixups are done
         */
        LOADBASE(r3,cur_cpu_spec)
-       ld      r4,cur_cpu_spec@l(r3)
+       ld      r4,OFF(cur_cpu_spec)(r3)
        ld      r4,CPU_SPEC_FEATURES(r4)
        andi.   r0,r4,CPU_FTR_CAN_NAP
        beqlr
        /* Now check if user or arch enabled NAP mode */
        LOADBASE(r3,powersave_nap)
-       lwz     r4,powersave_nap@l(r3)
+       lwz     r4,OFF(powersave_nap)(r3)
        cmpwi   0,r4,0
        beqlr
 
@@ -63,8 +63,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
        beq     1f
        mtmsrd  r7      /* out of line this ? */
        blr
-1:     
-       /* Go to NAP now */     
+1:
+       /* Go to NAP now */
 BEGIN_FTR_SECTION
        DSSALL
        sync
@@ -76,4 +76,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
        isync
        sync
        blr
-       
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
new file mode 100644 (file)
index 0000000..303229b
--- /dev/null
@@ -0,0 +1,1064 @@
+/*
+ * This file contains miscellaneous low-level functions.
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/sys.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/cputable.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+       .text
+
+       .align  5
+_GLOBAL(__delay)
+       cmpwi   0,r3,0
+       mtctr   r3
+       beqlr
+1:     bdnz    1b
+       blr
+
+/*
+ * This returns the high 64 bits of the product of two 64-bit numbers.
+ */
+_GLOBAL(mulhdu)
+       cmpwi   r6,0
+       cmpwi   cr1,r3,0
+       mr      r10,r4
+       mulhwu  r4,r4,r5
+       beq     1f
+       mulhwu  r0,r10,r6
+       mullw   r7,r10,r5
+       addc    r7,r0,r7
+       addze   r4,r4
+1:     beqlr   cr1             /* all done if high part of A is 0 */
+       mr      r10,r3
+       mullw   r9,r3,r5
+       mulhwu  r3,r3,r5
+       beq     2f
+       mullw   r0,r10,r6
+       mulhwu  r8,r10,r6
+       addc    r7,r0,r7
+       adde    r4,r4,r8
+       addze   r3,r3
+2:     addc    r4,r4,r9
+       addze   r3,r3
+       blr
+
+/*
+ * Returns (address we're running at) - (address we were linked at)
+ * for use before the text and data are mapped to KERNELBASE.
+ */
+_GLOBAL(reloc_offset)
+       mflr    r0
+       bl      1f
+1:     mflr    r3
+       LOADADDR(r4,1b)
+       subf    r3,r4,r3
+       mtlr    r0
+       blr
+
+/*
+ * add_reloc_offset(x) returns x + reloc_offset().
+ */
+_GLOBAL(add_reloc_offset)
+       mflr    r0
+       bl      1f
+1:     mflr    r5
+       LOADADDR(r4,1b)
+       subf    r5,r4,r5
+       add     r3,r3,r5
+       mtlr    r0
+       blr
+
+/*
+ * sub_reloc_offset(x) returns x - reloc_offset().
+ */
+_GLOBAL(sub_reloc_offset)
+       mflr    r0
+       bl      1f
+1:     mflr    r5
+       lis     r4,1b@ha
+       addi    r4,r4,1b@l
+       subf    r5,r4,r5
+       subf    r3,r5,r3
+       mtlr    r0
+       blr
+
+/*
+ * reloc_got2 runs through the .got2 section adding an offset
+ * to each entry.
+ */
+_GLOBAL(reloc_got2)
+       mflr    r11
+       lis     r7,__got2_start@ha
+       addi    r7,r7,__got2_start@l
+       lis     r8,__got2_end@ha
+       addi    r8,r8,__got2_end@l
+       subf    r8,r7,r8
+       srwi.   r8,r8,2
+       beqlr
+       mtctr   r8
+       bl      1f
+1:     mflr    r0
+       lis     r4,1b@ha
+       addi    r4,r4,1b@l
+       subf    r0,r4,r0
+       add     r7,r0,r7
+2:     lwz     r0,0(r7)
+       add     r0,r0,r3
+       stw     r0,0(r7)
+       addi    r7,r7,4
+       bdnz    2b
+       mtlr    r11
+       blr
+
+/*
+ * identify_cpu,
+ * called with r3 = data offset and r4 = CPU number
+ * doesn't change r3
+ */
+_GLOBAL(identify_cpu)
+       addis   r8,r3,cpu_specs@ha
+       addi    r8,r8,cpu_specs@l
+       mfpvr   r7
+1:
+       lwz     r5,CPU_SPEC_PVR_MASK(r8)
+       and     r5,r5,r7
+       lwz     r6,CPU_SPEC_PVR_VALUE(r8)
+       cmplw   0,r6,r5
+       beq     1f
+       addi    r8,r8,CPU_SPEC_ENTRY_SIZE
+       b       1b
+1:
+       addis   r6,r3,cur_cpu_spec@ha
+       addi    r6,r6,cur_cpu_spec@l
+       sub     r8,r8,r3
+       stw     r8,0(r6)
+       blr
+
+/*
+ * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
+ * and writes nop's over sections of code that don't apply for this cpu.
+ * r3 = data offset (not changed)
+ */
+_GLOBAL(do_cpu_ftr_fixups)
+       /* Get CPU 0 features */
+       addis   r6,r3,cur_cpu_spec@ha
+       addi    r6,r6,cur_cpu_spec@l
+       lwz     r4,0(r6)
+       add     r4,r4,r3
+       lwz     r4,CPU_SPEC_FEATURES(r4)
+
+       /* Get the fixup table */
+       addis   r6,r3,__start___ftr_fixup@ha
+       addi    r6,r6,__start___ftr_fixup@l
+       addis   r7,r3,__stop___ftr_fixup@ha
+       addi    r7,r7,__stop___ftr_fixup@l
+
+       /* Do the fixup */
+1:     cmplw   0,r6,r7
+       bgelr
+       addi    r6,r6,16
+       lwz     r8,-16(r6)      /* mask */
+       and     r8,r8,r4
+       lwz     r9,-12(r6)      /* value */
+       cmplw   0,r8,r9
+       beq     1b
+       lwz     r8,-8(r6)       /* section begin */
+       lwz     r9,-4(r6)       /* section end */
+       subf.   r9,r8,r9
+       beq     1b
+       /* write nops over the section of code */
+       /* todo: if large section, add a branch at the start of it */
+       srwi    r9,r9,2
+       mtctr   r9
+       add     r8,r8,r3
+       lis     r0,0x60000000@h /* nop */
+3:     stw     r0,0(r8)
+       andi.   r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
+       beq     2f
+       dcbst   0,r8            /* suboptimal, but simpler */
+       sync
+       icbi    0,r8
+2:     addi    r8,r8,4
+       bdnz    3b
+       sync                    /* additional sync needed on g4 */
+       isync
+       b       1b
+
+/*
+ * call_setup_cpu - call the setup_cpu function for this cpu
+ * r3 = data offset, r24 = cpu number
+ *
+ * Setup function is called with:
+ *   r3 = data offset
+ *   r4 = ptr to CPU spec (relocated)
+ */
+_GLOBAL(call_setup_cpu)
+       addis   r4,r3,cur_cpu_spec@ha
+       addi    r4,r4,cur_cpu_spec@l
+       lwz     r4,0(r4)
+       add     r4,r4,r3
+       lwz     r5,CPU_SPEC_SETUP(r4)
+       cmpi    0,r5,0
+       add     r5,r5,r3
+       beqlr
+       mtctr   r5
+       bctr
+
+#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
+
+/* This gets called by via-pmu.c to switch the PLL selection
+ * on 750fx CPU. This function should really be moved to some
+ * other place (as most of the cpufreq code in via-pmu
+ */
+_GLOBAL(low_choose_750fx_pll)
+       /* Clear MSR:EE */
+       mfmsr   r7
+       rlwinm  r0,r7,0,17,15
+       mtmsr   r0
+
+       /* If switching to PLL1, disable HID0:BTIC */
+       cmplwi  cr0,r3,0
+       beq     1f
+       mfspr   r5,SPRN_HID0
+       rlwinm  r5,r5,0,27,25
+       sync
+       mtspr   SPRN_HID0,r5
+       isync
+       sync
+
+1:
+       /* Calc new HID1 value */
+       mfspr   r4,SPRN_HID1    /* Build a HID1:PS bit from parameter */
+       rlwinm  r5,r3,16,15,15  /* Clear out HID1:PS from value read */
+       rlwinm  r4,r4,0,16,14   /* Could have I used rlwimi here ? */
+       or      r4,r4,r5
+       mtspr   SPRN_HID1,r4
+
+       /* Store new HID1 image */
+       rlwinm  r6,r1,0,0,18
+       lwz     r6,TI_CPU(r6)
+       slwi    r6,r6,2
+       addis   r6,r6,nap_save_hid1@ha
+       stw     r4,nap_save_hid1@l(r6)
+
+       /* If switching to PLL0, enable HID0:BTIC */
+       cmplwi  cr0,r3,0
+       bne     1f
+       mfspr   r5,SPRN_HID0
+       ori     r5,r5,HID0_BTIC
+       sync
+       mtspr   SPRN_HID0,r5
+       isync
+       sync
+
+1:
+       /* Return */
+       mtmsr   r7
+       blr
+
+_GLOBAL(low_choose_7447a_dfs)
+       /* Clear MSR:EE */
+       mfmsr   r7
+       rlwinm  r0,r7,0,17,15
+       mtmsr   r0
+       
+       /* Calc new HID1 value */
+       mfspr   r4,SPRN_HID1
+       insrwi  r4,r3,1,9       /* insert parameter into bit 9 */
+       sync
+       mtspr   SPRN_HID1,r4
+       sync
+       isync
+
+       /* Return */
+       mtmsr   r7
+       blr
+
+#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
+
+/*
+ * complement mask on the msr then "or" some values on.
+ *     _nmask_and_or_msr(nmask, value_to_or)
+ */
+_GLOBAL(_nmask_and_or_msr)
+       mfmsr   r0              /* Get current msr */
+       andc    r0,r0,r3        /* And off the bits set in r3 (first parm) */
+       or      r0,r0,r4        /* Or on the bits in r4 (second parm) */
+       SYNC                    /* Some chip revs have problems here... */
+       mtmsr   r0              /* Update machine state */
+       isync
+       blr                     /* Done */
+
+
+/*
+ * Flush MMU TLB
+ */
+_GLOBAL(_tlbia)
+#if defined(CONFIG_40x)
+       sync                    /* Flush to memory before changing mapping */
+       tlbia
+       isync                   /* Flush shadow TLB */
+#elif defined(CONFIG_44x)
+       li      r3,0
+       sync
+
+       /* Load high watermark */
+       lis     r4,tlb_44x_hwater@ha
+       lwz     r5,tlb_44x_hwater@l(r4)
+
+1:     tlbwe   r3,r3,PPC44x_TLB_PAGEID
+       addi    r3,r3,1
+       cmpw    0,r3,r5
+       ble     1b
+
+       isync
+#elif defined(CONFIG_FSL_BOOKE)
+       /* Invalidate all entries in TLB0 */
+       li      r3, 0x04
+       tlbivax 0,3
+       /* Invalidate all entries in TLB1 */
+       li      r3, 0x0c
+       tlbivax 0,3
+       /* Invalidate all entries in TLB2 */
+       li      r3, 0x14
+       tlbivax 0,3
+       /* Invalidate all entries in TLB3 */
+       li      r3, 0x1c
+       tlbivax 0,3
+       msync
+#ifdef CONFIG_SMP
+       tlbsync
+#endif /* CONFIG_SMP */
+#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
+#if defined(CONFIG_SMP)
+       rlwinm  r8,r1,0,0,18
+       lwz     r8,TI_CPU(r8)
+       oris    r8,r8,10
+       mfmsr   r10
+       SYNC
+       rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
+       rlwinm  r0,r0,0,28,26           /* clear DR */
+       mtmsr   r0
+       SYNC_601
+       isync
+       lis     r9,mmu_hash_lock@h
+       ori     r9,r9,mmu_hash_lock@l
+       tophys(r9,r9)
+10:    lwarx   r7,0,r9
+       cmpwi   0,r7,0
+       bne-    10b
+       stwcx.  r8,0,r9
+       bne-    10b
+       sync
+       tlbia
+       sync
+       TLBSYNC
+       li      r0,0
+       stw     r0,0(r9)                /* clear mmu_hash_lock */
+       mtmsr   r10
+       SYNC_601
+       isync
+#else /* CONFIG_SMP */
+       sync
+       tlbia
+       sync
+#endif /* CONFIG_SMP */
+#endif /* ! defined(CONFIG_40x) */
+       blr
+
+/*
+ * Flush MMU TLB for a particular address
+ */
+_GLOBAL(_tlbie)
+#if defined(CONFIG_40x)
+       tlbsx.  r3, 0, r3
+       bne     10f
+       sync
+       /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
+        * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
+        * the TLB entry. */
+       tlbwe   r3, r3, TLB_TAG
+       isync
+10:
+#elif defined(CONFIG_44x)
+       mfspr   r4,SPRN_MMUCR
+       mfspr   r5,SPRN_PID                     /* Get PID */
+       rlwimi  r4,r5,0,24,31                   /* Set TID */
+       mtspr   SPRN_MMUCR,r4
+
+       tlbsx.  r3, 0, r3
+       bne     10f
+       sync
+       /* There are only 64 TLB entries, so r3 < 64,
+        * which means bit 22, is clear.  Since 22 is
+        * the V bit in the TLB_PAGEID, loading this
+        * value will invalidate the TLB entry.
+        */
+       tlbwe   r3, r3, PPC44x_TLB_PAGEID
+       isync
+10:
+#elif defined(CONFIG_FSL_BOOKE)
+       rlwinm  r4, r3, 0, 0, 19
+       ori     r5, r4, 0x08    /* TLBSEL = 1 */
+       ori     r6, r4, 0x10    /* TLBSEL = 2 */
+       ori     r7, r4, 0x18    /* TLBSEL = 3 */
+       tlbivax 0, r4
+       tlbivax 0, r5
+       tlbivax 0, r6
+       tlbivax 0, r7
+       msync
+#if defined(CONFIG_SMP)
+       tlbsync
+#endif /* CONFIG_SMP */
+#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
+#if defined(CONFIG_SMP)
+       rlwinm  r8,r1,0,0,18
+       lwz     r8,TI_CPU(r8)
+       oris    r8,r8,11
+       mfmsr   r10
+       SYNC
+       rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
+       rlwinm  r0,r0,0,28,26           /* clear DR */
+       mtmsr   r0
+       SYNC_601
+       isync
+       lis     r9,mmu_hash_lock@h
+       ori     r9,r9,mmu_hash_lock@l
+       tophys(r9,r9)
+10:    lwarx   r7,0,r9
+       cmpwi   0,r7,0
+       bne-    10b
+       stwcx.  r8,0,r9
+       bne-    10b
+       eieio
+       tlbie   r3
+       sync
+       TLBSYNC
+       li      r0,0
+       stw     r0,0(r9)                /* clear mmu_hash_lock */
+       mtmsr   r10
+       SYNC_601
+       isync
+#else /* CONFIG_SMP */
+       tlbie   r3
+       sync
+#endif /* CONFIG_SMP */
+#endif /* ! CONFIG_40x */
+       blr
+
+/*
+ * Flush instruction cache.
+ * This is a no-op on the 601.
+ */
+_GLOBAL(flush_instruction_cache)
+#if defined(CONFIG_8xx)
+       isync
+       lis     r5, IDC_INVALL@h
+       mtspr   SPRN_IC_CST, r5
+#elif defined(CONFIG_4xx)
+#ifdef CONFIG_403GCX
+       li      r3, 512
+       mtctr   r3
+       lis     r4, KERNELBASE@h
+1:     iccci   0, r4
+       addi    r4, r4, 16
+       bdnz    1b
+#else
+       lis     r3, KERNELBASE@h
+       iccci   0,r3
+#endif
+#elif CONFIG_FSL_BOOKE
+BEGIN_FTR_SECTION
+       mfspr   r3,SPRN_L1CSR0
+       ori     r3,r3,L1CSR0_CFI|L1CSR0_CLFC
+       /* msync; isync recommended here */
+       mtspr   SPRN_L1CSR0,r3
+       isync
+       blr
+END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+       mfspr   r3,SPRN_L1CSR1
+       ori     r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
+       mtspr   SPRN_L1CSR1,r3
+#else
+       mfspr   r3,SPRN_PVR
+       rlwinm  r3,r3,16,16,31
+       cmpwi   0,r3,1
+       beqlr                   /* for 601, do nothing */
+       /* 603/604 processor - use invalidate-all bit in HID0 */
+       mfspr   r3,SPRN_HID0
+       ori     r3,r3,HID0_ICFI
+       mtspr   SPRN_HID0,r3
+#endif /* CONFIG_8xx/4xx */
+       isync
+       blr
+
+/*
+ * Write any modified data cache blocks out to memory
+ * and invalidate the corresponding instruction cache blocks.
+ * This is a no-op on the 601.
+ *
+ * flush_icache_range(unsigned long start, unsigned long stop)
+ */
+_GLOBAL(flush_icache_range)
+BEGIN_FTR_SECTION
+       blr                             /* for 601, do nothing */
+END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+       li      r5,L1_CACHE_BYTES-1
+       andc    r3,r3,r5
+       subf    r4,r3,r4
+       add     r4,r4,r5
+       srwi.   r4,r4,L1_CACHE_SHIFT
+       beqlr
+       mtctr   r4
+       mr      r6,r3
+1:     dcbst   0,r3
+       addi    r3,r3,L1_CACHE_BYTES
+       bdnz    1b
+       sync                            /* wait for dcbst's to get to ram */
+       mtctr   r4
+2:     icbi    0,r6
+       addi    r6,r6,L1_CACHE_BYTES
+       bdnz    2b
+       sync                            /* additional sync needed on g4 */
+       isync
+       blr
+/*
+ * Write any modified data cache blocks out to memory.
+ * Does not invalidate the corresponding cache lines (especially for
+ * any corresponding instruction cache).
+ *
+ * clean_dcache_range(unsigned long start, unsigned long stop)
+ */
+_GLOBAL(clean_dcache_range)
+       li      r5,L1_CACHE_BYTES-1
+       andc    r3,r3,r5
+       subf    r4,r3,r4
+       add     r4,r4,r5
+       srwi.   r4,r4,L1_CACHE_SHIFT
+       beqlr
+       mtctr   r4
+
+1:     dcbst   0,r3
+       addi    r3,r3,L1_CACHE_BYTES
+       bdnz    1b
+       sync                            /* wait for dcbst's to get to ram */
+       blr
+
+/*
+ * Write any modified data cache blocks out to memory and invalidate them.
+ * Does not invalidate the corresponding instruction cache blocks.
+ *
+ * flush_dcache_range(unsigned long start, unsigned long stop)
+ */
+_GLOBAL(flush_dcache_range)
+       li      r5,L1_CACHE_BYTES-1
+       andc    r3,r3,r5
+       subf    r4,r3,r4
+       add     r4,r4,r5
+       srwi.   r4,r4,L1_CACHE_SHIFT
+       beqlr
+       mtctr   r4
+
+1:     dcbf    0,r3
+       addi    r3,r3,L1_CACHE_BYTES
+       bdnz    1b
+       sync                            /* wait for dcbst's to get to ram */
+       blr
+
+/*
+ * Like above, but invalidate the D-cache.  This is used by the 8xx
+ * to invalidate the cache so the PPC core doesn't get stale data
+ * from the CPM (no cache snooping here :-).
+ *
+ * invalidate_dcache_range(unsigned long start, unsigned long stop)
+ */
+_GLOBAL(invalidate_dcache_range)
+       li      r5,L1_CACHE_BYTES-1
+       andc    r3,r3,r5
+       subf    r4,r3,r4
+       add     r4,r4,r5
+       srwi.   r4,r4,L1_CACHE_SHIFT
+       beqlr
+       mtctr   r4
+
+1:     dcbi    0,r3
+       addi    r3,r3,L1_CACHE_BYTES
+       bdnz    1b
+       sync                            /* wait for dcbi's to get to ram */
+       blr
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+/*
+ * 40x cores have 8K or 16K dcache and 32 byte line size.
+ * 44x has a 32K dcache and 32 byte line size.
+ * 8xx has 1, 2, 4, 8K variants.
+ * For now, cover the worst case of the 44x.
+ * Must be called with external interrupts disabled.
+ */
+#define CACHE_NWAYS    64
+#define CACHE_NLINES   16
+
+_GLOBAL(flush_dcache_all)
+       li      r4, (2 * CACHE_NWAYS * CACHE_NLINES)
+       mtctr   r4
+       lis     r5, KERNELBASE@h
+1:     lwz     r3, 0(r5)               /* Load one word from every line */
+       addi    r5, r5, L1_CACHE_BYTES
+       bdnz    1b
+       blr
+#endif /* CONFIG_NOT_COHERENT_CACHE */
+
+/*
+ * Flush a particular page from the data cache to RAM.
+ * Note: this is necessary because the instruction cache does *not*
+ * snoop from the data cache.
+ * This is a no-op on the 601 which has a unified cache.
+ *
+ *     void __flush_dcache_icache(void *page)
+ */
+_GLOBAL(__flush_dcache_icache)
+BEGIN_FTR_SECTION
+       blr                                     /* for 601, do nothing */
+END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+       rlwinm  r3,r3,0,0,19                    /* Get page base address */
+       li      r4,4096/L1_CACHE_BYTES  /* Number of lines in a page */
+       mtctr   r4
+       mr      r6,r3
+0:     dcbst   0,r3                            /* Write line to ram */
+       addi    r3,r3,L1_CACHE_BYTES
+       bdnz    0b
+       sync
+       mtctr   r4
+1:     icbi    0,r6
+       addi    r6,r6,L1_CACHE_BYTES
+       bdnz    1b
+       sync
+       isync
+       blr
+
+/*
+ * Flush a particular page from the data cache to RAM, identified
+ * by its physical address.  We turn off the MMU so we can just use
+ * the physical address (this may be a highmem page without a kernel
+ * mapping).
+ *
+ *     void __flush_dcache_icache_phys(unsigned long physaddr)
+ */
+_GLOBAL(__flush_dcache_icache_phys)
+BEGIN_FTR_SECTION
+       blr                                     /* for 601, do nothing */
+END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+       mfmsr   r10
+       rlwinm  r0,r10,0,28,26                  /* clear DR */
+       mtmsr   r0
+       isync
+       rlwinm  r3,r3,0,0,19                    /* Get page base address */
+       li      r4,4096/L1_CACHE_BYTES  /* Number of lines in a page */
+       mtctr   r4
+       mr      r6,r3
+0:     dcbst   0,r3                            /* Write line to ram */
+       addi    r3,r3,L1_CACHE_BYTES
+       bdnz    0b
+       sync
+       mtctr   r4
+1:     icbi    0,r6
+       addi    r6,r6,L1_CACHE_BYTES
+       bdnz    1b
+       sync
+       mtmsr   r10                             /* restore DR */
+       isync
+       blr
+
+/*
+ * Clear pages using the dcbz instruction, which doesn't cause any
+ * memory traffic (except to write out any cache lines which get
+ * displaced).  This only works on cacheable memory.
+ *
+ * void clear_pages(void *page, int order) ;
+ */
+_GLOBAL(clear_pages)
+       li      r0,4096/L1_CACHE_BYTES
+       slw     r0,r0,r4
+       mtctr   r0
+#ifdef CONFIG_8xx
+       li      r4, 0
+1:     stw     r4, 0(r3)
+       stw     r4, 4(r3)
+       stw     r4, 8(r3)
+       stw     r4, 12(r3)
+#else
+1:     dcbz    0,r3
+#endif
+       addi    r3,r3,L1_CACHE_BYTES
+       bdnz    1b
+       blr
+
+/*
+ * Copy a whole page.  We use the dcbz instruction on the destination
+ * to reduce memory traffic (it eliminates the unnecessary reads of
+ * the destination into cache).  This requires that the destination
+ * is cacheable.
+ */
+#define COPY_16_BYTES          \
+       lwz     r6,4(r4);       \
+       lwz     r7,8(r4);       \
+       lwz     r8,12(r4);      \
+       lwzu    r9,16(r4);      \
+       stw     r6,4(r3);       \
+       stw     r7,8(r3);       \
+       stw     r8,12(r3);      \
+       stwu    r9,16(r3)
+
+_GLOBAL(copy_page)
+       addi    r3,r3,-4
+       addi    r4,r4,-4
+
+#ifdef CONFIG_8xx
+       /* don't use prefetch on 8xx */
+       li      r0,4096/L1_CACHE_BYTES
+       mtctr   r0
+1:     COPY_16_BYTES
+       bdnz    1b
+       blr
+
+#else  /* not 8xx, we can prefetch */
+       li      r5,4
+
+#if MAX_COPY_PREFETCH > 1
+       li      r0,MAX_COPY_PREFETCH
+       li      r11,4
+       mtctr   r0
+11:    dcbt    r11,r4
+       addi    r11,r11,L1_CACHE_BYTES
+       bdnz    11b
+#else /* MAX_COPY_PREFETCH == 1 */
+       dcbt    r5,r4
+       li      r11,L1_CACHE_BYTES+4
+#endif /* MAX_COPY_PREFETCH */
+       li      r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
+       crclr   4*cr0+eq
+2:
+       mtctr   r0
+1:
+       dcbt    r11,r4
+       dcbz    r5,r3
+       COPY_16_BYTES
+#if L1_CACHE_BYTES >= 32
+       COPY_16_BYTES
+#if L1_CACHE_BYTES >= 64
+       COPY_16_BYTES
+       COPY_16_BYTES
+#if L1_CACHE_BYTES >= 128
+       COPY_16_BYTES
+       COPY_16_BYTES
+       COPY_16_BYTES
+       COPY_16_BYTES
+#endif
+#endif
+#endif
+       bdnz    1b
+       beqlr
+       crnot   4*cr0+eq,4*cr0+eq
+       li      r0,MAX_COPY_PREFETCH
+       li      r11,4
+       b       2b
+#endif /* CONFIG_8xx */
+
+/*
+ * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
+ * void atomic_set_mask(atomic_t mask, atomic_t *addr);
+ */
+_GLOBAL(atomic_clear_mask)
+10:    lwarx   r5,0,r4
+       andc    r5,r5,r3
+       PPC405_ERR77(0,r4)
+       stwcx.  r5,0,r4
+       bne-    10b
+       blr
+_GLOBAL(atomic_set_mask)
+10:    lwarx   r5,0,r4
+       or      r5,r5,r3
+       PPC405_ERR77(0,r4)
+       stwcx.  r5,0,r4
+       bne-    10b
+       blr
+
+/*
+ * I/O string operations
+ *
+ * insb(port, buf, len)
+ * outsb(port, buf, len)
+ * insw(port, buf, len)
+ * outsw(port, buf, len)
+ * insl(port, buf, len)
+ * outsl(port, buf, len)
+ * insw_ns(port, buf, len)
+ * outsw_ns(port, buf, len)
+ * insl_ns(port, buf, len)
+ * outsl_ns(port, buf, len)
+ *
+ * The *_ns versions don't do byte-swapping.
+ */
+_GLOBAL(_insb)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,1
+       blelr-
+00:    lbz     r5,0(r3)
+       eieio
+       stbu    r5,1(r4)
+       bdnz    00b
+       blr
+
+_GLOBAL(_outsb)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,1
+       blelr-
+00:    lbzu    r5,1(r4)
+       stb     r5,0(r3)
+       eieio
+       bdnz    00b
+       blr
+
+_GLOBAL(_insw)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,2
+       blelr-
+00:    lhbrx   r5,0,r3
+       eieio
+       sthu    r5,2(r4)
+       bdnz    00b
+       blr
+
+_GLOBAL(_outsw)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,2
+       blelr-
+00:    lhzu    r5,2(r4)
+       eieio
+       sthbrx  r5,0,r3
+       bdnz    00b
+       blr
+
+_GLOBAL(_insl)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,4
+       blelr-
+00:    lwbrx   r5,0,r3
+       eieio
+       stwu    r5,4(r4)
+       bdnz    00b
+       blr
+
+_GLOBAL(_outsl)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,4
+       blelr-
+00:    lwzu    r5,4(r4)
+       stwbrx  r5,0,r3
+       eieio
+       bdnz    00b
+       blr
+
+_GLOBAL(__ide_mm_insw)
+_GLOBAL(_insw_ns)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,2
+       blelr-
+00:    lhz     r5,0(r3)
+       eieio
+       sthu    r5,2(r4)
+       bdnz    00b
+       blr
+
+_GLOBAL(__ide_mm_outsw)
+_GLOBAL(_outsw_ns)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,2
+       blelr-
+00:    lhzu    r5,2(r4)
+       sth     r5,0(r3)
+       eieio
+       bdnz    00b
+       blr
+
+_GLOBAL(__ide_mm_insl)
+_GLOBAL(_insl_ns)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,4
+       blelr-
+00:    lwz     r5,0(r3)
+       eieio
+       stwu    r5,4(r4)
+       bdnz    00b
+       blr
+
+_GLOBAL(__ide_mm_outsl)
+_GLOBAL(_outsl_ns)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,4
+       blelr-
+00:    lwzu    r5,4(r4)
+       stw     r5,0(r3)
+       eieio
+       bdnz    00b
+       blr
+
+/*
+ * Extended precision shifts.
+ *
+ * Updated to be valid for shift counts from 0 to 63 inclusive.
+ * -- Gabriel
+ *
+ * R3/R4 has 64 bit value
+ * R5    has shift count
+ * result in R3/R4
+ *
+ *  ashrdi3: arithmetic right shift (sign propagation) 
+ *  lshrdi3: logical right shift
+ *  ashldi3: left shift
+ */
+_GLOBAL(__ashrdi3)
+       subfic  r6,r5,32
+       srw     r4,r4,r5        # LSW = count > 31 ? 0 : LSW >> count
+       addi    r7,r5,32        # could be xori, or addi with -32
+       slw     r6,r3,r6        # t1 = count > 31 ? 0 : MSW << (32-count)
+       rlwinm  r8,r7,0,32      # t3 = (count < 32) ? 32 : 0
+       sraw    r7,r3,r7        # t2 = MSW >> (count-32)
+       or      r4,r4,r6        # LSW |= t1
+       slw     r7,r7,r8        # t2 = (count < 32) ? 0 : t2
+       sraw    r3,r3,r5        # MSW = MSW >> count
+       or      r4,r4,r7        # LSW |= t2
+       blr
+
+_GLOBAL(__ashldi3)
+       subfic  r6,r5,32
+       slw     r3,r3,r5        # MSW = count > 31 ? 0 : MSW << count
+       addi    r7,r5,32        # could be xori, or addi with -32
+       srw     r6,r4,r6        # t1 = count > 31 ? 0 : LSW >> (32-count)
+       slw     r7,r4,r7        # t2 = count < 32 ? 0 : LSW << (count-32)
+       or      r3,r3,r6        # MSW |= t1
+       slw     r4,r4,r5        # LSW = LSW << count
+       or      r3,r3,r7        # MSW |= t2
+       blr
+
+_GLOBAL(__lshrdi3)
+       subfic  r6,r5,32
+       srw     r4,r4,r5        # LSW = count > 31 ? 0 : LSW >> count
+       addi    r7,r5,32        # could be xori, or addi with -32
+       slw     r6,r3,r6        # t1 = count > 31 ? 0 : MSW << (32-count)
+       srw     r7,r3,r7        # t2 = count < 32 ? 0 : MSW >> (count-32)
+       or      r4,r4,r6        # LSW |= t1
+       srw     r3,r3,r5        # MSW = MSW >> count
+       or      r4,r4,r7        # LSW |= t2
+       blr
+
+_GLOBAL(abs)
+       srawi   r4,r3,31
+       xor     r3,r3,r4
+       sub     r3,r3,r4
+       blr
+
+_GLOBAL(_get_SP)
+       mr      r3,r1           /* Close enough */
+       blr
+
+/*
+ * These are used in the alignment trap handler when emulating
+ * single-precision loads and stores.
+ * We restore and save the fpscr so the task gets the same result
+ * and exceptions as if the cpu had performed the load or store.
+ */
+
+#ifdef CONFIG_PPC_FPU
+_GLOBAL(cvt_fd)
+       lfd     0,-4(r5)        /* load up fpscr value */
+       mtfsf   0xff,0
+       lfs     0,0(r3)
+       stfd    0,0(r4)
+       mffs    0               /* save new fpscr value */
+       stfd    0,-4(r5)
+       blr
+
+_GLOBAL(cvt_df)
+       lfd     0,-4(r5)        /* load up fpscr value */
+       mtfsf   0xff,0
+       lfd     0,0(r3)
+       stfs    0,0(r4)
+       mffs    0               /* save new fpscr value */
+       stfd    0,-4(r5)
+       blr
+#endif
+
+/*
+ * Create a kernel thread
+ *   kernel_thread(fn, arg, flags)
+ */
+_GLOBAL(kernel_thread)
+       stwu    r1,-16(r1)
+       stw     r30,8(r1)
+       stw     r31,12(r1)
+       mr      r30,r3          /* function */
+       mr      r31,r4          /* argument */
+       ori     r3,r5,CLONE_VM  /* flags */
+       oris    r3,r3,CLONE_UNTRACED>>16
+       li      r4,0            /* new sp (unused) */
+       li      r0,__NR_clone
+       sc
+       cmpwi   0,r3,0          /* parent or child? */
+       bne     1f              /* return if parent */
+       li      r0,0            /* make top-level stack frame */
+       stwu    r0,-16(r1)
+       mtlr    r30             /* fn addr in lr */
+       mr      r3,r31          /* load arg and call fn */
+       PPC440EP_ERR42
+       blrl
+       li      r0,__NR_exit    /* exit if function returns */
+       li      r3,0
+       sc
+1:     lwz     r30,8(r1)
+       lwz     r31,12(r1)
+       addi    r1,r1,16
+       blr
+
+_GLOBAL(execve)
+       li      r0,__NR_execve
+       sc
+       bnslr
+       neg     r3,r3
+       blr
+
+/*
+ * This routine is just here to keep GCC happy - sigh...
+ */
+_GLOBAL(__main)
+       blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
new file mode 100644 (file)
index 0000000..4775bed
--- /dev/null
@@ -0,0 +1,899 @@
+/*
+ *  arch/powerpc/kernel/misc64.S
+ *
+ * This file contains miscellaneous low-level functions.
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
+ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 
+ * 
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/sys.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+
+       .text
+
+/*
+ * Returns (address we are running at) - (address we were linked at)
+ * for use before the text and data are mapped to KERNELBASE.
+ */
+
+_GLOBAL(reloc_offset)
+       mflr    r0
+       bl      1f
+1:     mflr    r3
+       LOADADDR(r4,1b)
+       subf    r3,r4,r3
+       mtlr    r0
+       blr
+
+/*
+ * add_reloc_offset(x) returns x + reloc_offset().
+ */
+_GLOBAL(add_reloc_offset)
+       mflr    r0
+       bl      1f
+1:     mflr    r5
+       LOADADDR(r4,1b)
+       subf    r5,r4,r5
+       add     r3,r3,r5
+       mtlr    r0
+       blr
+
+_GLOBAL(get_msr)
+       mfmsr   r3
+       blr
+
+_GLOBAL(get_dar)
+       mfdar   r3
+       blr
+
+_GLOBAL(get_srr0)
+       mfsrr0  r3
+       blr
+
+_GLOBAL(get_srr1)
+       mfsrr1  r3
+       blr
+       
+_GLOBAL(get_sp)
+       mr      r3,r1
+       blr
+
+#ifdef CONFIG_IRQSTACKS
+_GLOBAL(call_do_softirq)
+       mflr    r0
+       std     r0,16(r1)
+       stdu    r1,THREAD_SIZE-112(r3)
+       mr      r1,r3
+       bl      .__do_softirq
+       ld      r1,0(r1)
+       ld      r0,16(r1)
+       mtlr    r0
+       blr
+
+_GLOBAL(call_handle_IRQ_event)
+       mflr    r0
+       std     r0,16(r1)
+       stdu    r1,THREAD_SIZE-112(r6)
+       mr      r1,r6
+       bl      .handle_IRQ_event
+       ld      r1,0(r1)
+       ld      r0,16(r1)
+       mtlr    r0
+       blr
+#endif /* CONFIG_IRQSTACKS */
+
+       /*
+ * To be called by C code which needs to do some operations with MMU
+ * disabled. Note that interrupts have to be disabled by the caller
+ * prior to calling us. The code called _MUST_ be in the RMO of course
+ * and part of the linear mapping as we don't attempt to translate the
+ * stack pointer at all. The function is called with the stack switched
+ * to this CPU emergency stack
+ *
+ * prototype is void *call_with_mmu_off(void *func, void *data);
+ *
+ * the called function is expected to be of the form
+ *
+ * void *called(void *data); 
+ */
+_GLOBAL(call_with_mmu_off)
+       mflr    r0                      /* get link, save it on stackframe */
+       std     r0,16(r1)
+       mr      r1,r5                   /* save old stack ptr */
+       ld      r1,PACAEMERGSP(r13)     /* get emerg. stack */
+       subi    r1,r1,STACK_FRAME_OVERHEAD
+       std     r0,16(r1)               /* save link on emerg. stack */
+       std     r5,0(r1)                /* save old stack ptr in backchain */
+       ld      r3,0(r3)                /* get to real function ptr (assume same TOC) */
+       bl      2f                      /* we need LR to return, continue at label 2 */
+
+       ld      r0,16(r1)               /* we return here from the call, get LR and */
+       ld      r1,0(r1)                /* .. old stack ptr */
+       mtspr   SPRN_SRR0,r0            /* and get back to virtual mode with these */
+       mfmsr   r4
+       ori     r4,r4,MSR_IR|MSR_DR
+       mtspr   SPRN_SRR1,r4
+       rfid
+
+2:     mtspr   SPRN_SRR0,r3            /* coming from above, enter real mode */
+       mr      r3,r4                   /* get parameter */
+       mfmsr   r0
+       ori     r0,r0,MSR_IR|MSR_DR
+       xori    r0,r0,MSR_IR|MSR_DR
+       mtspr   SPRN_SRR1,r0
+       rfid
+
+
+       .section        ".toc","aw"
+PPC64_CACHES:
+       .tc             ppc64_caches[TC],ppc64_caches
+       .section        ".text"
+
+/*
+ * Write any modified data cache blocks out to memory
+ * and invalidate the corresponding instruction cache blocks.
+ *
+ * flush_icache_range(unsigned long start, unsigned long stop)
+ *
+ *   flush all bytes from start through stop-1 inclusive
+ */
+
+_KPROBE(__flush_icache_range)
+
+/*
+ * Flush the data cache to memory 
+ * 
+ * Different systems have different cache line sizes
+ * and in some cases i-cache and d-cache line sizes differ from
+ * each other.
+ */
+       ld      r10,PPC64_CACHES@toc(r2)
+       lwz     r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
+       addi    r5,r7,-1
+       andc    r6,r3,r5                /* round low to line bdy */
+       subf    r8,r6,r4                /* compute length */
+       add     r8,r8,r5                /* ensure we get enough */
+       lwz     r9,DCACHEL1LOGLINESIZE(r10)     /* Get log-2 of cache line size */
+       srw.    r8,r8,r9                /* compute line count */
+       beqlr                           /* nothing to do? */
+       mtctr   r8
+1:     dcbst   0,r6
+       add     r6,r6,r7
+       bdnz    1b
+       sync
+
+/* Now invalidate the instruction cache */
+       
+       lwz     r7,ICACHEL1LINESIZE(r10)        /* Get Icache line size */
+       addi    r5,r7,-1
+       andc    r6,r3,r5                /* round low to line bdy */
+       subf    r8,r6,r4                /* compute length */
+       add     r8,r8,r5
+       lwz     r9,ICACHEL1LOGLINESIZE(r10)     /* Get log-2 of Icache line size */
+       srw.    r8,r8,r9                /* compute line count */
+       beqlr                           /* nothing to do? */
+       mtctr   r8
+2:     icbi    0,r6
+       add     r6,r6,r7
+       bdnz    2b
+       isync
+       blr
+       .previous .text
+/*
+ * Like above, but only do the D-cache.
+ *
+ * flush_dcache_range(unsigned long start, unsigned long stop)
+ *
+ *    flush all bytes from start to stop-1 inclusive
+ */
+_GLOBAL(flush_dcache_range)
+
+/*
+ * Flush the data cache to memory 
+ * 
+ * Different systems have different cache line sizes
+ */
+       ld      r10,PPC64_CACHES@toc(r2)
+       lwz     r7,DCACHEL1LINESIZE(r10)        /* Get dcache line size */
+       addi    r5,r7,-1
+       andc    r6,r3,r5                /* round low to line bdy */
+       subf    r8,r6,r4                /* compute length */
+       add     r8,r8,r5                /* ensure we get enough */
+       lwz     r9,DCACHEL1LOGLINESIZE(r10)     /* Get log-2 of dcache line size */
+       srw.    r8,r8,r9                /* compute line count */
+       beqlr                           /* nothing to do? */
+       mtctr   r8
+0:     dcbst   0,r6
+       add     r6,r6,r7
+       bdnz    0b
+       sync
+       blr
+
+/*
+ * Like above, but works on non-mapped physical addresses.
+ * Use only for non-LPAR setups ! It also assumes real mode
+ * is cacheable. Used for flushing out the DART before using
+ * it as uncacheable memory 
+ *
+ * flush_dcache_phys_range(unsigned long start, unsigned long stop)
+ *
+ *    flush all bytes from start to stop-1 inclusive
+ */
+_GLOBAL(flush_dcache_phys_range)
+       ld      r10,PPC64_CACHES@toc(r2)
+       lwz     r7,DCACHEL1LINESIZE(r10)        /* Get dcache line size */
+       addi    r5,r7,-1
+       andc    r6,r3,r5                /* round low to line bdy */
+       subf    r8,r6,r4                /* compute length */
+       add     r8,r8,r5                /* ensure we get enough */
+       lwz     r9,DCACHEL1LOGLINESIZE(r10)     /* Get log-2 of dcache line size */
+       srw.    r8,r8,r9                /* compute line count */
+       beqlr                           /* nothing to do? */
+       mfmsr   r5                      /* Disable MMU Data Relocation */
+       ori     r0,r5,MSR_DR
+       xori    r0,r0,MSR_DR
+       sync
+       mtmsr   r0
+       sync
+       isync
+       mtctr   r8
+0:     dcbst   0,r6
+       add     r6,r6,r7
+       bdnz    0b
+       sync
+       isync
+       mtmsr   r5                      /* Re-enable MMU Data Relocation */
+       sync
+       isync
+       blr
+
+_GLOBAL(flush_inval_dcache_range)
+       ld      r10,PPC64_CACHES@toc(r2)
+       lwz     r7,DCACHEL1LINESIZE(r10)        /* Get dcache line size */
+       addi    r5,r7,-1
+       andc    r6,r3,r5                /* round low to line bdy */
+       subf    r8,r6,r4                /* compute length */
+       add     r8,r8,r5                /* ensure we get enough */
+       lwz     r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
+       srw.    r8,r8,r9                /* compute line count */
+       beqlr                           /* nothing to do? */
+       sync
+       isync
+       mtctr   r8
+0:     dcbf    0,r6
+       add     r6,r6,r7
+       bdnz    0b
+       sync
+       isync
+       blr
+
+
+/*
+ * Flush a particular page from the data cache to RAM.
+ * Note: this is necessary because the instruction cache does *not*
+ * snoop from the data cache.
+ *
+ *     void __flush_dcache_icache(void *page)
+ */
+_GLOBAL(__flush_dcache_icache)
+/*
+ * Flush the data cache to memory 
+ * 
+ * Different systems have different cache line sizes
+ */
+
+/* Flush the dcache */
+       ld      r7,PPC64_CACHES@toc(r2)
+       clrrdi  r3,r3,PAGE_SHIFT                    /* Page align */
+       lwz     r4,DCACHEL1LINESPERPAGE(r7)     /* Get # dcache lines per page */
+       lwz     r5,DCACHEL1LINESIZE(r7)         /* Get dcache line size */
+       mr      r6,r3
+       mtctr   r4
+0:     dcbst   0,r6
+       add     r6,r6,r5
+       bdnz    0b
+       sync
+
+/* Now invalidate the icache */        
+
+       lwz     r4,ICACHEL1LINESPERPAGE(r7)     /* Get # icache lines per page */
+       lwz     r5,ICACHEL1LINESIZE(r7)         /* Get icache line size */
+       mtctr   r4
+1:     icbi    0,r3
+       add     r3,r3,r5
+       bdnz    1b
+       isync
+       blr
+       
+/*
+ * I/O string operations
+ *
+ * insb(port, buf, len)
+ * outsb(port, buf, len)
+ * insw(port, buf, len)
+ * outsw(port, buf, len)
+ * insl(port, buf, len)
+ * outsl(port, buf, len)
+ * insw_ns(port, buf, len)
+ * outsw_ns(port, buf, len)
+ * insl_ns(port, buf, len)
+ * outsl_ns(port, buf, len)
+ *
+ * The *_ns versions don't do byte-swapping.
+ */
+_GLOBAL(_insb)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,1
+       blelr-
+00:    lbz     r5,0(r3)
+       eieio
+       stbu    r5,1(r4)
+       bdnz    00b
+       twi     0,r5,0
+       isync
+       blr
+
+_GLOBAL(_outsb)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,1
+       blelr-
+00:    lbzu    r5,1(r4)
+       stb     r5,0(r3)
+       bdnz    00b
+       sync
+       blr     
+
+_GLOBAL(_insw)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,2
+       blelr-
+00:    lhbrx   r5,0,r3
+       eieio
+       sthu    r5,2(r4)
+       bdnz    00b
+       twi     0,r5,0
+       isync
+       blr
+
+_GLOBAL(_outsw)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,2
+       blelr-
+00:    lhzu    r5,2(r4)
+       sthbrx  r5,0,r3 
+       bdnz    00b
+       sync
+       blr     
+
+_GLOBAL(_insl)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,4
+       blelr-
+00:    lwbrx   r5,0,r3
+       eieio
+       stwu    r5,4(r4)
+       bdnz    00b
+       twi     0,r5,0
+       isync
+       blr
+
+_GLOBAL(_outsl)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,4
+       blelr-
+00:    lwzu    r5,4(r4)
+       stwbrx  r5,0,r3
+       bdnz    00b
+       sync
+       blr     
+
+/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
+_GLOBAL(_insw_ns)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,2
+       blelr-
+00:    lhz     r5,0(r3)
+       eieio
+       sthu    r5,2(r4)
+       bdnz    00b
+       twi     0,r5,0
+       isync
+       blr
+
+/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
+_GLOBAL(_outsw_ns)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,2
+       blelr-
+00:    lhzu    r5,2(r4)
+       sth     r5,0(r3)
+       bdnz    00b
+       sync
+       blr     
+
+_GLOBAL(_insl_ns)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,4
+       blelr-
+00:    lwz     r5,0(r3)
+       eieio
+       stwu    r5,4(r4)
+       bdnz    00b
+       twi     0,r5,0
+       isync
+       blr
+
+_GLOBAL(_outsl_ns)
+       cmpwi   0,r5,0
+       mtctr   r5
+       subi    r4,r4,4
+       blelr-
+00:    lwzu    r5,4(r4)
+       stw     r5,0(r3)
+       bdnz    00b
+       sync
+       blr     
+
+
+_GLOBAL(cvt_fd)
+       lfd     0,0(r5)         /* load up fpscr value */
+       mtfsf   0xff,0
+       lfs     0,0(r3)
+       stfd    0,0(r4)
+       mffs    0               /* save new fpscr value */
+       stfd    0,0(r5)
+       blr
+
+_GLOBAL(cvt_df)
+       lfd     0,0(r5)         /* load up fpscr value */
+       mtfsf   0xff,0
+       lfd     0,0(r3)
+       stfs    0,0(r4)
+       mffs    0               /* save new fpscr value */
+       stfd    0,0(r5)
+       blr
+
+/*
+ * identify_cpu and calls setup_cpu
+ * In: r3 = base of the cpu_specs array
+ *     r4 = address of cur_cpu_spec
+ *     r5 = relocation offset
+ */
+_GLOBAL(identify_cpu)
+       mfpvr   r7
+1:
+       lwz     r8,CPU_SPEC_PVR_MASK(r3)
+       and     r8,r8,r7
+       lwz     r9,CPU_SPEC_PVR_VALUE(r3)
+       cmplw   0,r9,r8
+       beq     1f
+       addi    r3,r3,CPU_SPEC_ENTRY_SIZE
+       b       1b
+1:
+       sub     r0,r3,r5
+       std     r0,0(r4)
+       ld      r4,CPU_SPEC_SETUP(r3)
+       add     r4,r4,r5
+       ld      r4,0(r4)
+       add     r4,r4,r5
+       mtctr   r4
+       /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
+       mr      r4,r3
+       mr      r3,r5
+       bctr
+
+/*
+ * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
+ * and writes nop's over sections of code that don't apply for this cpu.
+ * r3 = data offset (not changed)
+ */
+_GLOBAL(do_cpu_ftr_fixups)
+       /* Get CPU 0 features */
+       LOADADDR(r6,cur_cpu_spec)
+       sub     r6,r6,r3
+       ld      r4,0(r6)
+       sub     r4,r4,r3
+       ld      r4,CPU_SPEC_FEATURES(r4)
+       /* Get the fixup table */
+       LOADADDR(r6,__start___ftr_fixup)
+       sub     r6,r6,r3
+       LOADADDR(r7,__stop___ftr_fixup)
+       sub     r7,r7,r3
+       /* Do the fixup */
+1:     cmpld   r6,r7
+       bgelr
+       addi    r6,r6,32
+       ld      r8,-32(r6)      /* mask */
+       and     r8,r8,r4
+       ld      r9,-24(r6)      /* value */
+       cmpld   r8,r9
+       beq     1b
+       ld      r8,-16(r6)      /* section begin */
+       ld      r9,-8(r6)       /* section end */
+       subf.   r9,r8,r9
+       beq     1b
+       /* write nops over the section of code */
+       /* todo: if large section, add a branch at the start of it */
+       srwi    r9,r9,2
+       mtctr   r9
+       sub     r8,r8,r3
+       lis     r0,0x60000000@h /* nop */
+3:     stw     r0,0(r8)
+       andi.   r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
+       beq     2f
+       dcbst   0,r8            /* suboptimal, but simpler */
+       sync
+       icbi    0,r8
+2:     addi    r8,r8,4
+       bdnz    3b
+       sync                    /* additional sync needed on g4 */
+       isync
+       b       1b
+
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
+/*
+ * Do an IO access in real mode
+ */
+_GLOBAL(real_readb)
+       mfmsr   r7
+       ori     r0,r7,MSR_DR
+       xori    r0,r0,MSR_DR
+       sync
+       mtmsrd  r0
+       sync
+       isync
+       mfspr   r6,SPRN_HID4
+       rldicl  r5,r6,32,0
+       ori     r5,r5,0x100
+       rldicl  r5,r5,32,0
+       sync
+       mtspr   SPRN_HID4,r5
+       isync
+       slbia
+       isync
+       lbz     r3,0(r3)
+       sync
+       mtspr   SPRN_HID4,r6
+       isync
+       slbia
+       isync
+       mtmsrd  r7
+       sync
+       isync
+       blr
+
+       /*
+ * Do an IO access in real mode
+ */
+_GLOBAL(real_writeb)
+       mfmsr   r7
+       ori     r0,r7,MSR_DR
+       xori    r0,r0,MSR_DR
+       sync
+       mtmsrd  r0
+       sync
+       isync
+       mfspr   r6,SPRN_HID4
+       rldicl  r5,r6,32,0
+       ori     r5,r5,0x100
+       rldicl  r5,r5,32,0
+       sync
+       mtspr   SPRN_HID4,r5
+       isync
+       slbia
+       isync
+       stb     r3,0(r4)
+       sync
+       mtspr   SPRN_HID4,r6
+       isync
+       slbia
+       isync
+       mtmsrd  r7
+       sync
+       isync
+       blr
+#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
+
+/*
+ * Create a kernel thread
+ *   kernel_thread(fn, arg, flags)
+ */
+_GLOBAL(kernel_thread)
+       std     r29,-24(r1)
+       std     r30,-16(r1)
+       stdu    r1,-STACK_FRAME_OVERHEAD(r1)
+       mr      r29,r3
+       mr      r30,r4
+       ori     r3,r5,CLONE_VM  /* flags */
+       oris    r3,r3,(CLONE_UNTRACED>>16)
+       li      r4,0            /* new sp (unused) */
+       li      r0,__NR_clone
+       sc
+       cmpdi   0,r3,0          /* parent or child? */
+       bne     1f              /* return if parent */
+       li      r0,0
+       stdu    r0,-STACK_FRAME_OVERHEAD(r1)
+       ld      r2,8(r29)
+       ld      r29,0(r29)
+       mtlr    r29              /* fn addr in lr */
+       mr      r3,r30          /* load arg and call fn */
+       blrl
+       li      r0,__NR_exit    /* exit after child exits */
+        li     r3,0
+       sc
+1:     addi    r1,r1,STACK_FRAME_OVERHEAD      
+       ld      r29,-24(r1)
+       ld      r30,-16(r1)
+       blr
+
+/*
+ * disable_kernel_fp()
+ * Disable the FPU.
+ */
+_GLOBAL(disable_kernel_fp)
+       mfmsr   r3
+       rldicl  r0,r3,(63-MSR_FP_LG),1
+       rldicl  r3,r0,(MSR_FP_LG+1),0
+       mtmsrd  r3                      /* disable use of fpu now */
+       isync
+       blr
+
+#ifdef CONFIG_ALTIVEC
+
+#if 0 /* this has no callers for now */
+/*
+ * disable_kernel_altivec()
+ * Disable the VMX.
+ */
+_GLOBAL(disable_kernel_altivec)
+       mfmsr   r3
+       rldicl  r0,r3,(63-MSR_VEC_LG),1
+       rldicl  r3,r0,(MSR_VEC_LG+1),0
+       mtmsrd  r3                      /* disable use of VMX now */
+       isync
+       blr
+#endif /* 0 */
+
+/*
+ * giveup_altivec(tsk)
+ * Disable VMX for the task given as the argument,
+ * and save the vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ */
+_GLOBAL(giveup_altivec)
+       mfmsr   r5
+       oris    r5,r5,MSR_VEC@h
+       mtmsrd  r5                      /* enable use of VMX now */
+       isync
+       cmpdi   0,r3,0
+       beqlr-                          /* if no previous owner, done */
+       addi    r3,r3,THREAD            /* want THREAD of task */
+       ld      r5,PT_REGS(r3)
+       cmpdi   0,r5,0
+       SAVE_32VRS(0,r4,r3)
+       mfvscr  vr0
+       li      r4,THREAD_VSCR
+       stvx    vr0,r4,r3
+       beq     1f
+       ld      r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       lis     r3,MSR_VEC@h
+       andc    r4,r4,r3                /* disable FP for previous task */
+       std     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+       li      r5,0
+       ld      r4,last_task_used_altivec@got(r2)
+       std     r5,0(r4)
+#endif /* CONFIG_SMP */
+       blr
+
+#endif /* CONFIG_ALTIVEC */
+
+_GLOBAL(__setup_cpu_power3)
+       blr
+
+_GLOBAL(execve)
+       li      r0,__NR_execve
+       sc
+       bnslr
+       neg     r3,r3
+       blr
+
+/* kexec_wait(phys_cpu)
+ *
+ * wait for the flag to change, indicating this kernel is going away but
+ * the slave code for the next one is at addresses 0 to 100.
+ *
+ * This is used by all slaves.
+ *
+ * Physical (hardware) cpu id should be in r3.
+ */
+_GLOBAL(kexec_wait)
+       bl      1f
+1:     mflr    r5
+       addi    r5,r5,kexec_flag-1b
+
+99:    HMT_LOW
+#ifdef CONFIG_KEXEC            /* use no memory without kexec */
+       lwz     r4,0(r5)
+       cmpwi   0,r4,0
+       bnea    0x60
+#endif
+       b       99b
+
+/* this can be in text because we won't change it until we are
+ * running in real anyways
+ */
+kexec_flag:
+       .long   0
+
+
+#ifdef CONFIG_KEXEC
+
+/* kexec_smp_wait(void)
+ *
+ * call with interrupts off
+ * note: this is a terminal routine, it does not save lr
+ *
+ * get phys id from paca
+ * set paca id to -1 to say we got here
+ * switch to real mode
+ * join other cpus in kexec_wait(phys_id)
+ */
+_GLOBAL(kexec_smp_wait)
+       lhz     r3,PACAHWCPUID(r13)
+       li      r4,-1
+       sth     r4,PACAHWCPUID(r13)     /* let others know we left */
+       bl      real_mode
+       b       .kexec_wait
+
+/*
+ * switch to real mode (turn mmu off)
+ * we use the early kernel trick that the hardware ignores bits
+ * 0 and 1 (big endian) of the effective address in real mode
+ *
+ * don't overwrite r3 here, it is live for kexec_wait above.
+ */
+real_mode:     /* assume normal blr return */
+1:     li      r9,MSR_RI
+       li      r10,MSR_DR|MSR_IR
+       mflr    r11             /* return address to SRR0 */
+       mfmsr   r12
+       andc    r9,r12,r9
+       andc    r10,r12,r10
+
+       mtmsrd  r9,1
+       mtspr   SPRN_SRR1,r10
+       mtspr   SPRN_SRR0,r11
+       rfid
+
+
+/*
+ * kexec_sequence(newstack, start, image, control, clear_all())
+ *
+ * does the grungy work with stack switching and real mode switches
+ * also does simple calls to other code
+ */
+
+_GLOBAL(kexec_sequence)
+       mflr    r0
+       std     r0,16(r1)
+
+       /* switch stacks to newstack -- &kexec_stack.stack */
+       stdu    r1,THREAD_SIZE-112(r3)
+       mr      r1,r3
+
+       li      r0,0
+       std     r0,16(r1)
+
+       /* save regs for local vars on new stack.
+        * yes, we won't go back, but ...
+        */
+       std     r31,-8(r1)
+       std     r30,-16(r1)
+       std     r29,-24(r1)
+       std     r28,-32(r1)
+       std     r27,-40(r1)
+       std     r26,-48(r1)
+       std     r25,-56(r1)
+
+       stdu    r1,-112-64(r1)
+
+       /* save args into preserved regs */
+       mr      r31,r3                  /* newstack (both) */
+       mr      r30,r4                  /* start (real) */
+       mr      r29,r5                  /* image (virt) */
+       mr      r28,r6                  /* control, unused */
+       mr      r27,r7                  /* clear_all() fn desc */
+       mr      r26,r8                  /* spare */
+       lhz     r25,PACAHWCPUID(r13)    /* get our phys cpu from paca */
+
+       /* disable interrupts, we are overwriting kernel data next */
+       mfmsr   r3
+       rlwinm  r3,r3,0,17,15
+       mtmsrd  r3,1
+
+       /* copy dest pages, flush whole dest image */
+       mr      r3,r29
+       bl      .kexec_copy_flush       /* (image) */
+
+       /* turn off mmu */
+       bl      real_mode
+
+       /* clear out hardware hash page table and tlb */
+       ld      r5,0(r27)               /* deref function descriptor */
+       mtctr   r5
+       bctrl                           /* ppc_md.hash_clear_all(void); */
+
+/*
+ *   kexec image calling is:
+ *      the first 0x100 bytes of the entry point are copied to 0
+ *
+ *      all slaves branch to slave = 0x60 (absolute)
+ *              slave(phys_cpu_id);
+ *
+ *      master goes to start = entry point
+ *              start(phys_cpu_id, start, 0);
+ *
+ *
+ *   a wrapper is needed to call existing kernels, here is an approximate
+ *   description of one method:
+ *
+ * v2: (2.6.10)
+ *   start will be near the boot_block (maybe 0x100 bytes before it?)
+ *   it will have a 0x60, which will b to boot_block, where it will wait
+ *   and 0 will store phys into struct boot-block and load r3 from there,
+ *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
+ *
+ * v1: (2.6.9)
+ *    boot block will have all cpus scanning device tree to see if they
+ *    are the boot cpu ?????
+ *    other device tree differences (prop sizes, va vs pa, etc)...
+ */
+
+       /* copy  0x100 bytes starting at start to 0 */
+       li      r3,0
+       mr      r4,r30
+       li      r5,0x100
+       li      r6,0
+       bl      .copy_and_flush /* (dest, src, copy limit, start offset) */
+1:     /* assume normal blr return */
+
+       /* release other cpus to the new kernel secondary start at 0x60 */
+       mflr    r5
+       li      r6,1
+       stw     r6,kexec_flag-1b(5)
+       mr      r3,r25  # my phys cpu
+       mr      r4,r30  # start, aka phys mem offset
+       mtlr    4
+       li      r5,0
+       blr     /* image->start(physid, image->start, 0); */
+#endif /* CONFIG_KEXEC */
similarity index 99%
rename from arch/ppc64/kernel/of_device.c
rename to arch/powerpc/kernel/of_device.c
index 9f200f0..7667188 100644 (file)
@@ -184,6 +184,7 @@ void of_release_dev(struct device *dev)
        struct of_device *ofdev;
 
         ofdev = to_of_device(dev);
+       of_node_put(ofdev->node);
        kfree(ofdev);
 }
 
@@ -244,7 +245,7 @@ struct of_device* of_platform_device_create(struct device_node *np,
                return NULL;
        memset(dev, 0, sizeof(*dev));
 
-       dev->node = np;
+       dev->node = of_node_get(np);
        dev->dma_mask = 0xffffffffUL;
        dev->dev.dma_mask = &dev->dma_mask;
        dev->dev.parent = parent;
@@ -261,7 +262,6 @@ struct of_device* of_platform_device_create(struct device_node *np,
        return dev;
 }
 
-
 EXPORT_SYMBOL(of_match_device);
 EXPORT_SYMBOL(of_platform_bus_type);
 EXPORT_SYMBOL(of_register_driver);
similarity index 73%
rename from arch/ppc64/kernel/pmc.c
rename to arch/powerpc/kernel/pmc.c
index 63d9481..2d333cc 100644 (file)
@@ -1,7 +1,10 @@
 /*
- *  linux/arch/ppc64/kernel/pmc.c
+ *  arch/powerpc/kernel/pmc.c
  *
  *  Copyright (C) 2004 David Gibson, IBM Corporation.
+ *  Includes code formerly from arch/ppc/kernel/perfmon.c:
+ *    Author: Andy Fleming
+ *    Copyright (c) 2004 Freescale Semiconductor, Inc
  *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
 #include <asm/processor.h>
 #include <asm/pmc.h>
 
+#if defined(CONFIG_FSL_BOOKE) && !defined(CONFIG_E200)
+static void dummy_perf(struct pt_regs *regs)
+{
+       unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
+
+       pmgc0 &= ~PMGC0_PMIE;
+       mtpmr(PMRN_PMGC0, pmgc0);
+}
+#elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
+
+#ifndef MMCR0_PMAO
+#define MMCR0_PMAO     0
+#endif
+
 /* Ensure exceptions are disabled */
 static void dummy_perf(struct pt_regs *regs)
 {
@@ -25,6 +42,11 @@ static void dummy_perf(struct pt_regs *regs)
        mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO);
        mtspr(SPRN_MMCR0, mmcr0);
 }
+#else
+static void dummy_perf(struct pt_regs *regs)
+{
+}
+#endif
 
 static DEFINE_SPINLOCK(pmc_owner_lock);
 static void *pmc_owner_caller; /* mostly for debugging */
@@ -66,11 +88,12 @@ void release_pmc_hardware(void)
 }
 EXPORT_SYMBOL_GPL(release_pmc_hardware);
 
+#ifdef CONFIG_PPC64
 void power4_enable_pmcs(void)
 {
        unsigned long hid0;
 
-       hid0 = mfspr(HID0);
+       hid0 = mfspr(SPRN_HID0);
        hid0 |= 1UL << (63 - 20);
 
        /* POWER4 requires the following sequence */
@@ -83,6 +106,7 @@ void power4_enable_pmcs(void)
                "mfspr     %0, %1\n"
                "mfspr     %0, %1\n"
                "mfspr     %0, %1\n"
-               "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0):
+               "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
                "memory");
 }
+#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
new file mode 100644 (file)
index 0000000..254bf9c
--- /dev/null
@@ -0,0 +1,280 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/elfcore.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/nvram.h>
+#include <linux/console.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/pm.h>
+#include <linux/bitops.h>
+
+#include <asm/page.h>
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/ide.h>
+#include <asm/atomic.h>
+#include <asm/checksum.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <linux/adb.h>
+#include <linux/cuda.h>
+#include <linux/pmu.h>
+#include <asm/prom.h>
+#include <asm/system.h>
+#include <asm/pci-bridge.h>
+#include <asm/irq.h>
+#include <asm/pmac_feature.h>
+#include <asm/dma.h>
+#include <asm/machdep.h>
+#include <asm/hw_irq.h>
+#include <asm/nvram.h>
+#include <asm/mmu_context.h>
+#include <asm/backlight.h>
+#include <asm/time.h>
+#include <asm/cputable.h>
+#include <asm/btext.h>
+#include <asm/div64.h>
+#include <asm/xmon.h>
+
+#ifdef  CONFIG_8xx
+#include <asm/commproc.h>
+#endif
+
+#ifdef CONFIG_PPC32
+extern void transfer_to_handler(void);
+extern void do_IRQ(struct pt_regs *regs);
+extern void machine_check_exception(struct pt_regs *regs);
+extern void alignment_exception(struct pt_regs *regs);
+extern void program_check_exception(struct pt_regs *regs);
+extern void single_step_exception(struct pt_regs *regs);
+extern int do_signal(sigset_t *, struct pt_regs *);
+extern int pmac_newworld;
+extern int sys_sigreturn(struct pt_regs *regs);
+
+EXPORT_SYMBOL(clear_pages);
+EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
+EXPORT_SYMBOL(DMA_MODE_READ);
+EXPORT_SYMBOL(DMA_MODE_WRITE);
+EXPORT_SYMBOL(__div64_32);
+
+EXPORT_SYMBOL(do_signal);
+EXPORT_SYMBOL(transfer_to_handler);
+EXPORT_SYMBOL(do_IRQ);
+EXPORT_SYMBOL(machine_check_exception);
+EXPORT_SYMBOL(alignment_exception);
+EXPORT_SYMBOL(program_check_exception);
+EXPORT_SYMBOL(single_step_exception);
+EXPORT_SYMBOL(sys_sigreturn);
+#endif
+
+#if defined(CONFIG_PPC_PREP)
+EXPORT_SYMBOL(_prep_type);
+EXPORT_SYMBOL(ucSystemType);
+#endif
+
+#if !defined(__INLINE_BITOPS)
+EXPORT_SYMBOL(set_bit);
+EXPORT_SYMBOL(clear_bit);
+EXPORT_SYMBOL(change_bit);
+EXPORT_SYMBOL(test_and_set_bit);
+EXPORT_SYMBOL(test_and_clear_bit);
+EXPORT_SYMBOL(test_and_change_bit);
+#endif /* __INLINE_BITOPS */
+
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strcasecmp);
+
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_generic);
+EXPORT_SYMBOL(ip_fast_csum);
+EXPORT_SYMBOL(csum_tcpudp_magic);
+
+EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__strnlen_user);
+
+EXPORT_SYMBOL(_insb);
+EXPORT_SYMBOL(_outsb);
+EXPORT_SYMBOL(_insw);
+EXPORT_SYMBOL(_outsw);
+EXPORT_SYMBOL(_insl);
+EXPORT_SYMBOL(_outsl);
+EXPORT_SYMBOL(_insw_ns);
+EXPORT_SYMBOL(_outsw_ns);
+EXPORT_SYMBOL(_insl_ns);
+EXPORT_SYMBOL(_outsl_ns);
+EXPORT_SYMBOL(ioremap);
+#ifdef CONFIG_44x
+EXPORT_SYMBOL(ioremap64);
+#endif
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+#ifdef CONFIG_PPC32
+EXPORT_SYMBOL(ioremap_bot);    /* aka VMALLOC_END */
+#endif
+
+#if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE))
+EXPORT_SYMBOL(ppc_ide_md);
+#endif
+
+#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
+EXPORT_SYMBOL(isa_io_base);
+EXPORT_SYMBOL(isa_mem_base);
+EXPORT_SYMBOL(pci_dram_offset);
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+EXPORT_SYMBOL(pci_bus_io_base);
+EXPORT_SYMBOL(pci_bus_io_base_phys);
+EXPORT_SYMBOL(pci_bus_mem_base_phys);
+EXPORT_SYMBOL(pci_bus_to_hose);
+EXPORT_SYMBOL(pci_resource_to_bus);
+EXPORT_SYMBOL(pci_phys_to_bus);
+EXPORT_SYMBOL(pci_bus_to_phys);
+#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+EXPORT_SYMBOL(flush_dcache_all);
+#endif
+
+EXPORT_SYMBOL(start_thread);
+EXPORT_SYMBOL(kernel_thread);
+
+EXPORT_SYMBOL(giveup_fpu);
+#ifdef CONFIG_ALTIVEC
+EXPORT_SYMBOL(giveup_altivec);
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+EXPORT_SYMBOL(giveup_spe);
+#endif /* CONFIG_SPE */
+
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(__flush_icache_range);
+#else
+EXPORT_SYMBOL(flush_instruction_cache);
+EXPORT_SYMBOL(flush_icache_range);
+EXPORT_SYMBOL(flush_tlb_kernel_range);
+EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL(_tlbie);
+#endif
+EXPORT_SYMBOL(flush_dcache_range);
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(smp_call_function);
+#ifdef CONFIG_PPC32
+EXPORT_SYMBOL(smp_hw_index);
+#endif
+#endif
+
+#ifdef CONFIG_ADB
+EXPORT_SYMBOL(adb_request);
+EXPORT_SYMBOL(adb_register);
+EXPORT_SYMBOL(adb_unregister);
+EXPORT_SYMBOL(adb_poll);
+EXPORT_SYMBOL(adb_try_handler_change);
+#endif /* CONFIG_ADB */
+#ifdef CONFIG_ADB_CUDA
+EXPORT_SYMBOL(cuda_request);
+EXPORT_SYMBOL(cuda_poll);
+#endif /* CONFIG_ADB_CUDA */
+#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32)
+EXPORT_SYMBOL(_machine);
+#endif
+#ifdef CONFIG_PPC_PMAC
+EXPORT_SYMBOL(sys_ctrler);
+#endif
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(kd_mksound);
+#endif
+EXPORT_SYMBOL(to_tm);
+
+#ifdef CONFIG_PPC32
+long long __ashrdi3(long long, int);
+long long __ashldi3(long long, int);
+long long __lshrdi3(long long, int);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__lshrdi3);
+#endif
+
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memchr);
+
+#if defined(CONFIG_FB_VGA16_MODULE)
+EXPORT_SYMBOL(screen_info);
+#endif
+
+#ifdef CONFIG_PPC32
+EXPORT_SYMBOL(pm_power_off);
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(timer_interrupt);
+EXPORT_SYMBOL(irq_desc);
+EXPORT_SYMBOL(tb_ticks_per_jiffy);
+EXPORT_SYMBOL(console_drivers);
+EXPORT_SYMBOL(cacheable_memcpy);
+#endif
+
+#ifdef CONFIG_XMON
+EXPORT_SYMBOL(xmon);
+EXPORT_SYMBOL(xmon_printf);
+#endif
+EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_interruptible);
+
+#ifdef  CONFIG_8xx
+EXPORT_SYMBOL(cpm_install_handler);
+EXPORT_SYMBOL(cpm_free_handler);
+#endif /* CONFIG_8xx */
+#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\
+       defined(CONFIG_83xx)
+EXPORT_SYMBOL(__res);
+#endif
+
+#ifdef CONFIG_PPC32
+EXPORT_SYMBOL(next_mmu_context);
+EXPORT_SYMBOL(set_context);
+#endif
+
+#ifdef CONFIG_PPC_STD_MMU_32
+extern long mol_trampoline;
+EXPORT_SYMBOL(mol_trampoline); /* For MOL */
+EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
+EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
+#ifdef CONFIG_SMP
+extern int mmu_hash_lock;
+EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
+#endif /* CONFIG_SMP */
+extern long *intercept_table;
+EXPORT_SYMBOL(intercept_table);
+#endif /* CONFIG_PPC_STD_MMU_32 */
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+EXPORT_SYMBOL(__mtdcr);
+EXPORT_SYMBOL(__mfdcr);
+#endif
similarity index 61%
rename from arch/ppc64/kernel/process.c
rename to arch/powerpc/kernel/process.c
index 8870053..047da1a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/arch/ppc64/kernel/process.c
+ *  arch/ppc/kernel/process.c
  *
  *  Derived from "arch/i386/kernel/process.c"
  *    Copyright (C) 1995  Linus Torvalds
@@ -7,7 +7,7 @@
  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  *  Paul Mackerras (paulus@cs.anu.edu.au)
  *
- *  PowerPC version 
+ *  PowerPC version
  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  *
  *  This program is free software; you can redistribute it and/or
@@ -17,7 +17,6 @@
  */
 
 #include <linux/config.h>
-#include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/smp_lock.h>
 #include <linux/stddef.h>
 #include <linux/unistd.h>
+#include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/user.h>
 #include <linux/elf.h>
 #include <linux/init.h>
-#include <linux/init_task.h>
 #include <linux/prctl.h>
-#include <linux/ptrace.h>
+#include <linux/init_task.h>
+#include <linux/module.h>
 #include <linux/kallsyms.h>
-#include <linux/interrupt.h>
+#include <linux/mqueue.h>
+#include <linux/hardirq.h>
 #include <linux/utsname.h>
 #include <linux/kprobes.h>
 
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/mmu.h>
-#include <asm/mmu_context.h>
 #include <asm/prom.h>
-#include <asm/ppcdebug.h>
-#include <asm/machdep.h>
-#include <asm/iSeries/HvCallHpt.h>
-#include <asm/cputable.h>
+#ifdef CONFIG_PPC64
 #include <asm/firmware.h>
-#include <asm/sections.h>
-#include <asm/tlbflush.h>
-#include <asm/time.h>
 #include <asm/plpar_wrappers.h>
+#include <asm/time.h>
+#endif
+
+extern unsigned long _get_SP(void);
 
 #ifndef CONFIG_SMP
 struct task_struct *last_task_used_math = NULL;
 struct task_struct *last_task_used_altivec = NULL;
+struct task_struct *last_task_used_spe = NULL;
 #endif
 
 /*
@@ -121,7 +120,6 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
 }
 
 #ifdef CONFIG_ALTIVEC
-
 void enable_kernel_altivec(void)
 {
        WARN_ON(preemptible());
@@ -130,7 +128,7 @@ void enable_kernel_altivec(void)
        if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
                giveup_altivec(current);
        else
-               giveup_altivec(NULL);   /* just enables FP for kernel */
+               giveup_altivec(NULL);   /* just enable AltiVec for kernel - force */
 #else
        giveup_altivec(last_task_used_altivec);
 #endif /* CONFIG_SMP */
@@ -161,9 +159,48 @@ int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
        memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
        return 1;
 }
-
 #endif /* CONFIG_ALTIVEC */
 
+#ifdef CONFIG_SPE
+
+void enable_kernel_spe(void)
+{
+       WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+               giveup_spe(current);
+       else
+               giveup_spe(NULL);       /* just enable SPE for kernel - force */
+#else
+       giveup_spe(last_task_used_spe);
+#endif /* __SMP __ */
+}
+EXPORT_SYMBOL(enable_kernel_spe);
+
+void flush_spe_to_thread(struct task_struct *tsk)
+{
+       if (tsk->thread.regs) {
+               preempt_disable();
+               if (tsk->thread.regs->msr & MSR_SPE) {
+#ifdef CONFIG_SMP
+                       BUG_ON(tsk != current);
+#endif
+                       giveup_spe(current);
+               }
+               preempt_enable();
+       }
+}
+
+int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+{
+       flush_spe_to_thread(current);
+       /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
+       memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
+       return 1;
+}
+#endif /* CONFIG_SPE */
+
 static void set_dabr_spr(unsigned long val)
 {
        mtspr(SPRN_DABR, val);
@@ -173,24 +210,27 @@ int set_dabr(unsigned long dabr)
 {
        int ret = 0;
 
+#ifdef CONFIG_PPC64
        if (firmware_has_feature(FW_FEATURE_XDABR)) {
                /* We want to catch accesses from kernel and userspace */
                unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
                ret = plpar_set_xdabr(dabr, flags);
        } else if (firmware_has_feature(FW_FEATURE_DABR)) {
                ret = plpar_set_dabr(dabr);
-       } else {
+       } else
+#endif
                set_dabr_spr(dabr);
-       }
 
        return ret;
 }
 
+#ifdef CONFIG_PPC64
 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
 static DEFINE_PER_CPU(unsigned long, current_dabr);
+#endif
 
 struct task_struct *__switch_to(struct task_struct *prev,
-                               struct task_struct *new)
+       struct task_struct *new)
 {
        struct thread_struct *new_thread, *old_thread;
        unsigned long flags;
@@ -200,7 +240,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
        /* avoid complexity of lazy save/restore of fpu
         * by just saving it every time we switch out if
         * this task used the fpu during the last quantum.
-        * 
+        *
         * If it tries to use the fpu again, it'll trap and
         * reload its fp regs.  So we don't have to do a restore
         * every switch, just a save.
@@ -209,31 +249,65 @@ struct task_struct *__switch_to(struct task_struct *prev,
        if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
                giveup_fpu(prev);
 #ifdef CONFIG_ALTIVEC
+       /*
+        * If the previous thread used altivec in the last quantum
+        * (thus changing altivec regs) then save them.
+        * We used to check the VRSAVE register but not all apps
+        * set it, so we don't rely on it now (and in fact we need
+        * to save & restore VSCR even if VRSAVE == 0).  -- paulus
+        *
+        * On SMP we always save/restore altivec regs just to avoid the
+        * complexity of changing processors.
+        *  -- Cort
+        */
        if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
                giveup_altivec(prev);
 #endif /* CONFIG_ALTIVEC */
-#endif /* CONFIG_SMP */
+#ifdef CONFIG_SPE
+       /*
+        * If the previous thread used spe in the last quantum
+        * (thus changing spe regs) then save them.
+        *
+        * On SMP we always save/restore spe regs just to avoid the
+        * complexity of changing processors.
+        */
+       if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
+               giveup_spe(prev);
+#endif /* CONFIG_SPE */
 
-#if defined(CONFIG_ALTIVEC) && !defined(CONFIG_SMP)
+#else  /* CONFIG_SMP */
+#ifdef CONFIG_ALTIVEC
        /* Avoid the trap.  On smp this this never happens since
         * we don't set last_task_used_altivec -- Cort
         */
        if (new->thread.regs && last_task_used_altivec == new)
                new->thread.regs->msr |= MSR_VEC;
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       /* Avoid the trap.  On smp this this never happens since
+        * we don't set last_task_used_spe
+        */
+       if (new->thread.regs && last_task_used_spe == new)
+               new->thread.regs->msr |= MSR_SPE;
+#endif /* CONFIG_SPE */
 
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PPC64    /* for now */
        if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
                set_dabr(new->thread.dabr);
                __get_cpu_var(current_dabr) = new->thread.dabr;
        }
 
        flush_tlb_pending();
+#endif
 
        new_thread = &new->thread;
        old_thread = &current->thread;
 
-       /* Collect purr utilization data per process and per processor
-        * wise purr is nothing but processor time base
+#ifdef CONFIG_PPC64
+       /*
+        * Collect processor utilization data per process
         */
        if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
                struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
@@ -243,6 +317,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
                old_thread->accum_tb += (current_tb - start_tb);
                new_thread->start_tb = current_tb;
        }
+#endif
 
        local_irq_save(flags);
        last = _switch(old_thread, new_thread);
@@ -254,6 +329,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
 
 static int instructions_to_print = 16;
 
+#ifdef CONFIG_PPC64
+#define BAD_PC(pc)     ((REGION_ID(pc) != KERNEL_REGION_ID) && \
+                        (REGION_ID(pc) != VMALLOC_REGION_ID))
+#else
+#define BAD_PC(pc)     ((pc) < KERNELBASE)
+#endif
+
 static void show_instructions(struct pt_regs *regs)
 {
        int i;
@@ -268,9 +350,7 @@ static void show_instructions(struct pt_regs *regs)
                if (!(i % 8))
                        printk("\n");
 
-               if (((REGION_ID(pc) != KERNEL_REGION_ID) &&
-                    (REGION_ID(pc) != VMALLOC_REGION_ID)) ||
-                    __get_user(instr, (unsigned int *)pc)) {
+               if (BAD_PC(pc) || __get_user(instr, (unsigned int *)pc)) {
                        printk("XXXXXXXX ");
                } else {
                        if (regs->nip == pc)
@@ -285,50 +365,82 @@ static void show_instructions(struct pt_regs *regs)
        printk("\n");
 }
 
+static struct regbit {
+       unsigned long bit;
+       const char *name;
+} msr_bits[] = {
+       {MSR_EE,        "EE"},
+       {MSR_PR,        "PR"},
+       {MSR_FP,        "FP"},
+       {MSR_ME,        "ME"},
+       {MSR_IR,        "IR"},
+       {MSR_DR,        "DR"},
+       {0,             NULL}
+};
+
+static void printbits(unsigned long val, struct regbit *bits)
+{
+       const char *sep = "";
+
+       printk("<");
+       for (; bits->bit; ++bits)
+               if (val & bits->bit) {
+                       printk("%s%s", sep, bits->name);
+                       sep = ",";
+               }
+       printk(">");
+}
+
+#ifdef CONFIG_PPC64
+#define REG            "%016lX"
+#define REGS_PER_LINE  4
+#define LAST_VOLATILE  13
+#else
+#define REG            "%08lX"
+#define REGS_PER_LINE  8
+#define LAST_VOLATILE  12
+#endif
+
 void show_regs(struct pt_regs * regs)
 {
-       int i;
-       unsigned long trap;
+       int i, trap;
 
-       printk("NIP: %016lX XER: %08X LR: %016lX CTR: %016lX\n",
-              regs->nip, (unsigned int)regs->xer, regs->link, regs->ctr);
+       printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
+              regs->nip, regs->link, regs->ctr);
        printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
               regs, regs->trap, print_tainted(), system_utsname.release);
-       printk("MSR: %016lx EE: %01x PR: %01x FP: %01x ME: %01x "
-              "IR/DR: %01x%01x CR: %08X\n",
-              regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
-              regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
-              regs->msr&MSR_IR ? 1 : 0,
-              regs->msr&MSR_DR ? 1 : 0,
-              (unsigned int)regs->ccr);
+       printk("MSR: "REG" ", regs->msr);
+       printbits(regs->msr, msr_bits);
+       printk("  CR: %08lX  XER: %08lX\n", regs->ccr, regs->xer);
        trap = TRAP(regs);
-       printk("DAR: %016lx DSISR: %016lx\n", regs->dar, regs->dsisr);
-       printk("TASK: %p[%d] '%s' THREAD: %p",
+       if (trap == 0x300 || trap == 0x600)
+               printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
+       printk("TASK = %p[%d] '%s' THREAD: %p",
               current, current->pid, current->comm, current->thread_info);
 
 #ifdef CONFIG_SMP
        printk(" CPU: %d", smp_processor_id());
 #endif /* CONFIG_SMP */
 
-       for (i = 0; i < 32; i++) {
-               if ((i % 4) == 0) {
+       for (i = 0;  i < 32;  i++) {
+               if ((i % REGS_PER_LINE) == 0)
                        printk("\n" KERN_INFO "GPR%02d: ", i);
-               }
-
-               printk("%016lX ", regs->gpr[i]);
-               if (i == 13 && !FULL_REGS(regs))
+               printk(REG " ", regs->gpr[i]);
+               if (i == LAST_VOLATILE && !FULL_REGS(regs))
                        break;
        }
        printk("\n");
+#ifdef CONFIG_KALLSYMS
        /*
         * Lookup NIP late so we have the best change of getting the
         * above info out without failing
         */
-       printk("NIP [%016lx] ", regs->nip);
+       printk("NIP ["REG"] ", regs->nip);
        print_symbol("%s\n", regs->nip);
-       printk("LR [%016lx] ", regs->link);
+       printk("LR ["REG"] ", regs->link);
        print_symbol("%s\n", regs->link);
-       show_stack(current, (unsigned long *)regs->gpr[1]);
+#endif
+       show_stack(current, (unsigned long *) regs->gpr[1]);
        if (!user_mode(regs))
                show_instructions(regs);
 }
@@ -344,16 +456,22 @@ void exit_thread(void)
        if (last_task_used_altivec == current)
                last_task_used_altivec = NULL;
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       if (last_task_used_spe == current)
+               last_task_used_spe = NULL;
+#endif
 #endif /* CONFIG_SMP */
 }
 
 void flush_thread(void)
 {
+#ifdef CONFIG_PPC64
        struct thread_info *t = current_thread_info();
 
-       kprobe_flush_task(current);
        if (t->flags & _TIF_ABI_PENDING)
                t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
+#endif
+       kprobe_flush_task(current);
 
 #ifndef CONFIG_SMP
        if (last_task_used_math == current)
@@ -362,12 +480,18 @@ void flush_thread(void)
        if (last_task_used_altivec == current)
                last_task_used_altivec = NULL;
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       if (last_task_used_spe == current)
+               last_task_used_spe = NULL;
+#endif
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_PPC64    /* for now */
        if (current->thread.dabr) {
                current->thread.dabr = 0;
                set_dabr(0);
        }
+#endif
 }
 
 void
@@ -375,7 +499,6 @@ release_thread(struct task_struct *t)
 {
 }
 
-
 /*
  * This gets called before we allocate a new thread and copy
  * the current task into it.
@@ -384,36 +507,44 @@ void prepare_to_copy(struct task_struct *tsk)
 {
        flush_fp_to_thread(current);
        flush_altivec_to_thread(current);
+       flush_spe_to_thread(current);
 }
 
 /*
  * Copy a thread..
  */
-int
-copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
-           unsigned long unused, struct task_struct *p, struct pt_regs *regs)
+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+               unsigned long unused, struct task_struct *p,
+               struct pt_regs *regs)
 {
        struct pt_regs *childregs, *kregs;
        extern void ret_from_fork(void);
        unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
 
+       CHECK_FULL_REGS(regs);
        /* Copy registers */
        sp -= sizeof(struct pt_regs);
        childregs = (struct pt_regs *) sp;
        *childregs = *regs;
        if ((childregs->msr & MSR_PR) == 0) {
-               /* for kernel thread, set stackptr in new task */
+               /* for kernel thread, set `current' and stackptr in new task */
                childregs->gpr[1] = sp + sizeof(struct pt_regs);
-               p->thread.regs = NULL;  /* no user register state */
+#ifdef CONFIG_PPC32
+               childregs->gpr[2] = (unsigned long) p;
+#else
                clear_ti_thread_flag(p->thread_info, TIF_32BIT);
+#endif
+               p->thread.regs = NULL;  /* no user register state */
        } else {
                childregs->gpr[1] = usp;
                p->thread.regs = childregs;
                if (clone_flags & CLONE_SETTLS) {
-                       if (test_thread_flag(TIF_32BIT))
-                               childregs->gpr[2] = childregs->gpr[6];
-                       else
+#ifdef CONFIG_PPC64
+                       if (!test_thread_flag(TIF_32BIT))
                                childregs->gpr[13] = childregs->gpr[6];
+                       else
+#endif
+                               childregs->gpr[2] = childregs->gpr[6];
                }
        }
        childregs->gpr[3] = 0;  /* Result from fork() */
@@ -431,6 +562,8 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
        kregs = (struct pt_regs *) sp;
        sp -= STACK_FRAME_OVERHEAD;
        p->thread.ksp = sp;
+
+#ifdef CONFIG_PPC64
        if (cpu_has_feature(CPU_FTR_SLB)) {
                unsigned long sp_vsid = get_kernel_vsid(sp);
 
@@ -449,6 +582,10 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
         * function.
         */
        kregs->nip = *((unsigned long *)ret_from_fork);
+#else
+       kregs->nip = (unsigned long)ret_from_fork;
+       p->thread.last_syscall = -1;
+#endif
 
        return 0;
 }
@@ -456,30 +593,17 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
 /*
  * Set up a thread for executing a new program
  */
-void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
+void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 {
-       unsigned long entry, toc, load_addr = regs->gpr[2];
+#ifdef CONFIG_PPC64
+       unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
+#endif
 
-       /* fdptr is a relocated pointer to the function descriptor for
-         * the elf _start routine.  The first entry in the function
-         * descriptor is the entry address of _start and the second
-         * entry is the TOC value we need to use.
-         */
        set_fs(USER_DS);
-       __get_user(entry, (unsigned long __user *)fdptr);
-       __get_user(toc, (unsigned long __user *)fdptr+1);
-
-       /* Check whether the e_entry function descriptor entries
-        * need to be relocated before we can use them.
-        */
-       if (load_addr != 0) {
-               entry += load_addr;
-               toc   += load_addr;
-       }
 
        /*
         * If we exec out of a kernel thread then thread.regs will not be
-        * set. Do it now.
+        * set.  Do it now.
         */
        if (!current->thread.regs) {
                unsigned long childregs = (unsigned long)current->thread_info +
@@ -488,36 +612,101 @@ void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
                current->thread.regs = (struct pt_regs *)childregs;
        }
 
-       regs->nip = entry;
+       memset(regs->gpr, 0, sizeof(regs->gpr));
+       regs->ctr = 0;
+       regs->link = 0;
+       regs->xer = 0;
+       regs->ccr = 0;
        regs->gpr[1] = sp;
-       regs->gpr[2] = toc;
-       regs->msr = MSR_USER64;
+
+#ifdef CONFIG_PPC32
+       regs->mq = 0;
+       regs->nip = start;
+       regs->msr = MSR_USER;
+#else
+       if (!test_thread_flag(TIF_32BIT)) {
+               unsigned long entry, toc;
+
+               /* start is a relocated pointer to the function descriptor for
+                * the elf _start routine.  The first entry in the function
+                * descriptor is the entry address of _start and the second
+                * entry is the TOC value we need to use.
+                */
+               __get_user(entry, (unsigned long __user *)start);
+               __get_user(toc, (unsigned long __user *)start+1);
+
+               /* Check whether the e_entry function descriptor entries
+                * need to be relocated before we can use them.
+                */
+               if (load_addr != 0) {
+                       entry += load_addr;
+                       toc   += load_addr;
+               }
+               regs->nip = entry;
+               regs->gpr[2] = toc;
+               regs->msr = MSR_USER64;
+       } else {
+               regs->nip = start;
+               regs->gpr[2] = 0;
+               regs->msr = MSR_USER32;
+       }
+#endif
+
 #ifndef CONFIG_SMP
        if (last_task_used_math == current)
-               last_task_used_math = 0;
+               last_task_used_math = NULL;
+#ifdef CONFIG_ALTIVEC
+       if (last_task_used_altivec == current)
+               last_task_used_altivec = NULL;
+#endif
+#ifdef CONFIG_SPE
+       if (last_task_used_spe == current)
+               last_task_used_spe = NULL;
+#endif
 #endif /* CONFIG_SMP */
        memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
        current->thread.fpscr = 0;
 #ifdef CONFIG_ALTIVEC
-#ifndef CONFIG_SMP
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = 0;
-#endif /* CONFIG_SMP */
        memset(current->thread.vr, 0, sizeof(current->thread.vr));
-       current->thread.vscr.u[0] = 0;
-       current->thread.vscr.u[1] = 0;
-       current->thread.vscr.u[2] = 0;
+       memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
        current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
        current->thread.vrsave = 0;
        current->thread.used_vr = 0;
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+       memset(current->thread.evr, 0, sizeof(current->thread.evr));
+       current->thread.acc = 0;
+       current->thread.spefscr = 0;
+       current->thread.used_spe = 0;
+#endif /* CONFIG_SPE */
 }
-EXPORT_SYMBOL(start_thread);
+
+#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
+               | PR_FP_EXC_RES | PR_FP_EXC_INV)
 
 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
 {
        struct pt_regs *regs = tsk->thread.regs;
 
+       /* This is a bit hairy.  If we are an SPE enabled  processor
+        * (have embedded fp) we store the IEEE exception enable flags in
+        * fpexc_mode.  fpexc_mode is also used for setting FP exception
+        * mode (asyn, precise, disabled) for 'Classic' FP. */
+       if (val & PR_FP_EXC_SW_ENABLE) {
+#ifdef CONFIG_SPE
+               tsk->thread.fpexc_mode = val &
+                       (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
+               return 0;
+#else
+               return -EINVAL;
+#endif
+       }
+
+       /* on a CONFIG_SPE this does not hurt us.  The bits that
+        * __pack_fe01 use do not overlap with bits used for
+        * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
+        * on CONFIG_SPE implementations are reserved so writing to
+        * them does not change anything */
        if (val > PR_FP_EXC_PRECISE)
                return -EINVAL;
        tsk->thread.fpexc_mode = __pack_fe01(val);
@@ -531,38 +720,41 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
 {
        unsigned int val;
 
-       val = __unpack_fe01(tsk->thread.fpexc_mode);
+       if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
+#ifdef CONFIG_SPE
+               val = tsk->thread.fpexc_mode;
+#else
+               return -EINVAL;
+#endif
+       else
+               val = __unpack_fe01(tsk->thread.fpexc_mode);
        return put_user(val, (unsigned int __user *) adr);
 }
 
-int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3,
-             unsigned long p4, unsigned long p5, unsigned long p6,
+#define TRUNC_PTR(x)   ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
+
+int sys_clone(unsigned long clone_flags, unsigned long usp,
+             int __user *parent_tidp, void __user *child_threadptr,
+             int __user *child_tidp, int p6,
              struct pt_regs *regs)
 {
-       unsigned long parent_tidptr = 0;
-       unsigned long child_tidptr = 0;
-
-       if (p2 == 0)
-               p2 = regs->gpr[1];      /* stack pointer for child */
-
-       if (clone_flags & (CLONE_PARENT_SETTID | CLONE_CHILD_SETTID |
-                          CLONE_CHILD_CLEARTID)) {
-               parent_tidptr = p3;
-               child_tidptr = p5;
-               if (test_thread_flag(TIF_32BIT)) {
-                       parent_tidptr &= 0xffffffff;
-                       child_tidptr &= 0xffffffff;
-               }
+       CHECK_FULL_REGS(regs);
+       if (usp == 0)
+               usp = regs->gpr[1];     /* stack pointer for child */
+#ifdef CONFIG_PPC64
+       if (test_thread_flag(TIF_32BIT)) {
+               parent_tidp = TRUNC_PTR(parent_tidp);
+               child_tidp = TRUNC_PTR(child_tidp);
        }
-
-       return do_fork(clone_flags, p2, regs, 0,
-                   (int __user *)parent_tidptr, (int __user *)child_tidptr);
+#endif
+       return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
 }
 
 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
             unsigned long p4, unsigned long p5, unsigned long p6,
             struct pt_regs *regs)
 {
+       CHECK_FULL_REGS(regs);
        return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
 }
 
@@ -570,8 +762,9 @@ int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
              unsigned long p4, unsigned long p5, unsigned long p6,
              struct pt_regs *regs)
 {
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0,
-                   NULL, NULL);
+       CHECK_FULL_REGS(regs);
+       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
+                       regs, 0, NULL, NULL);
 }
 
 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -579,30 +772,27 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
               struct pt_regs *regs)
 {
        int error;
-       char * filename;
-       
+       char *filename;
+
        filename = getname((char __user *) a0);
        error = PTR_ERR(filename);
        if (IS_ERR(filename))
                goto out;
        flush_fp_to_thread(current);
        flush_altivec_to_thread(current);
+       flush_spe_to_thread(current);
        error = do_execve(filename, (char __user * __user *) a1,
-                                   (char __user * __user *) a2, regs);
-  
+                         (char __user * __user *) a2, regs);
        if (error == 0) {
                task_lock(current);
                current->ptrace &= ~PT_DTRACE;
                task_unlock(current);
        }
        putname(filename);
-
 out:
        return error;
 }
 
-static int kstack_depth_to_print = 64;
-
 static int validate_sp(unsigned long sp, struct task_struct *p,
                       unsigned long nbytes)
 {
@@ -627,6 +817,20 @@ static int validate_sp(unsigned long sp, struct task_struct *p,
        return 0;
 }
 
+#ifdef CONFIG_PPC64
+#define MIN_STACK_FRAME        112     /* same as STACK_FRAME_OVERHEAD, in fact */
+#define FRAME_LR_SAVE  2
+#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD + 288)
+#define REGS_MARKER    0x7265677368657265ul
+#define FRAME_MARKER   12
+#else
+#define MIN_STACK_FRAME        16
+#define FRAME_LR_SAVE  1
+#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
+#define REGS_MARKER    0x72656773ul
+#define FRAME_MARKER   2
+#endif
+
 unsigned long get_wchan(struct task_struct *p)
 {
        unsigned long ip, sp;
@@ -636,15 +840,15 @@ unsigned long get_wchan(struct task_struct *p)
                return 0;
 
        sp = p->thread.ksp;
-       if (!validate_sp(sp, p, 112))
+       if (!validate_sp(sp, p, MIN_STACK_FRAME))
                return 0;
 
        do {
                sp = *(unsigned long *)sp;
-               if (!validate_sp(sp, p, 112))
+               if (!validate_sp(sp, p, MIN_STACK_FRAME))
                        return 0;
                if (count > 0) {
-                       ip = *(unsigned long *)(sp + 16);
+                       ip = ((unsigned long *)sp)[FRAME_LR_SAVE];
                        if (!in_sched_functions(ip))
                                return ip;
                }
@@ -653,33 +857,35 @@ unsigned long get_wchan(struct task_struct *p)
 }
 EXPORT_SYMBOL(get_wchan);
 
-void show_stack(struct task_struct *p, unsigned long *_sp)
+static int kstack_depth_to_print = 64;
+
+void show_stack(struct task_struct *tsk, unsigned long *stack)
 {
-       unsigned long ip, newsp, lr;
+       unsigned long sp, ip, lr, newsp;
        int count = 0;
-       unsigned long sp = (unsigned long)_sp;
        int firstframe = 1;
 
+       sp = (unsigned long) stack;
+       if (tsk == NULL)
+               tsk = current;
        if (sp == 0) {
-               if (p) {
-                       sp = p->thread.ksp;
-               } else {
-                       sp = __get_SP();
-                       p = current;
-               }
+               if (tsk == current)
+                       asm("mr %0,1" : "=r" (sp));
+               else
+                       sp = tsk->thread.ksp;
        }
 
        lr = 0;
        printk("Call Trace:\n");
        do {
-               if (!validate_sp(sp, p, 112))
+               if (!validate_sp(sp, tsk, MIN_STACK_FRAME))
                        return;
 
-               _sp = (unsigned long *) sp;
-               newsp = _sp[0];
-               ip = _sp[2];
+               stack = (unsigned long *) sp;
+               newsp = stack[0];
+               ip = stack[FRAME_LR_SAVE];
                if (!firstframe || ip != lr) {
-                       printk("[%016lx] [%016lx] ", sp, ip);
+                       printk("["REG"] ["REG"] ", sp, ip);
                        print_symbol("%s", ip);
                        if (firstframe)
                                printk(" (unreliable)");
@@ -691,8 +897,8 @@ void show_stack(struct task_struct *p, unsigned long *_sp)
                 * See if this is an exception frame.
                 * We look for the "regshere" marker in the current frame.
                 */
-               if (validate_sp(sp, p, sizeof(struct pt_regs) + 400)
-                   && _sp[12] == 0x7265677368657265ul) {
+               if (validate_sp(sp, tsk, INT_FRAME_SIZE)
+                   && stack[FRAME_MARKER] == REGS_MARKER) {
                        struct pt_regs *regs = (struct pt_regs *)
                                (sp + STACK_FRAME_OVERHEAD);
                        printk("--- Exception: %lx", regs->trap);
@@ -708,6 +914,6 @@ void show_stack(struct task_struct *p, unsigned long *_sp)
 
 void dump_stack(void)
 {
-       show_stack(current, (unsigned long *)__get_SP());
+       show_stack(current, NULL);
 }
 EXPORT_SYMBOL(dump_stack);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
new file mode 100644 (file)
index 0000000..c8d2884
--- /dev/null
@@ -0,0 +1,2125 @@
+/*
+ * Procedures for creating, accessing and interpreting the device tree.
+ *
+ * Paul Mackerras      August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ * 
+ *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ *    {engebret|bergner}@us.ibm.com 
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <stdarg.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/initrd.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/lmb.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/pci.h>
+#include <asm/iommu.h>
+#include <asm/btext.h>
+#include <asm/sections.h>
+#include <asm/machdep.h>
+#include <asm/pSeries_reconfig.h>
+#include <asm/pci-bridge.h>
+#ifdef CONFIG_PPC64
+#include <asm/systemcfg.h>
+#endif
+
+#ifdef DEBUG
+#define DBG(fmt...) printk(KERN_ERR fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+struct pci_reg_property {
+       struct pci_address addr;
+       u32 size_hi;
+       u32 size_lo;
+};
+
+struct isa_reg_property {
+       u32 space;
+       u32 address;
+       u32 size;
+};
+
+
+typedef int interpret_func(struct device_node *, unsigned long *,
+                          int, int, int);
+
+extern struct rtas_t rtas;
+extern struct lmb lmb;
+extern unsigned long klimit;
+
+static unsigned long memory_limit;
+
+static int __initdata dt_root_addr_cells;
+static int __initdata dt_root_size_cells;
+
+#ifdef CONFIG_PPC64
+static int __initdata iommu_is_off;
+int __initdata iommu_force_on;
+extern unsigned long tce_alloc_start, tce_alloc_end;
+#endif
+
+typedef u32 cell_t;
+
+#if 0
+static struct boot_param_header *initial_boot_params __initdata;
+#else
+struct boot_param_header *initial_boot_params;
+#endif
+
+static struct device_node *allnodes = NULL;
+
+/* use when traversing tree through the allnext, child, sibling,
+ * or parent members of struct device_node.
+ */
+static DEFINE_RWLOCK(devtree_lock);
+
+/* export that to outside world */
+struct device_node *of_chosen;
+
+struct device_node *dflt_interrupt_controller;
+int num_interrupt_controllers;
+
+u32 rtas_data;
+u32 rtas_entry;
+
+/*
+ * Wrapper for allocating memory for various data that needs to be
+ * attached to device nodes as they are processed at boot or when
+ * added to the device tree later (e.g. DLPAR).  At boot there is
+ * already a region reserved so we just increment *mem_start by size;
+ * otherwise we call kmalloc.
+ */
+static void * prom_alloc(unsigned long size, unsigned long *mem_start)
+{
+       unsigned long tmp;
+
+       if (!mem_start)
+               return kmalloc(size, GFP_KERNEL);
+
+       tmp = *mem_start;
+       *mem_start += size;
+       return (void *)tmp;
+}
+
+/*
+ * Find the device_node with a given phandle.
+ */
+static struct device_node * find_phandle(phandle ph)
+{
+       struct device_node *np;
+
+       for (np = allnodes; np != 0; np = np->allnext)
+               if (np->linux_phandle == ph)
+                       return np;
+       return NULL;
+}
+
+/*
+ * Find the interrupt parent of a node.
+ */
+static struct device_node * __devinit intr_parent(struct device_node *p)
+{
+       phandle *parp;
+
+       parp = (phandle *) get_property(p, "interrupt-parent", NULL);
+       if (parp == NULL)
+               return p->parent;
+       p = find_phandle(*parp);
+       if (p != NULL)
+               return p;
+       /*
+        * On a powermac booted with BootX, we don't get to know the
+        * phandles for any nodes, so find_phandle will return NULL.
+        * Fortunately these machines only have one interrupt controller
+        * so there isn't in fact any ambiguity.  -- paulus
+        */
+       if (num_interrupt_controllers == 1)
+               p = dflt_interrupt_controller;
+       return p;
+}
+
+/*
+ * Find out the size of each entry of the interrupts property
+ * for a node.
+ */
+int __devinit prom_n_intr_cells(struct device_node *np)
+{
+       struct device_node *p;
+       unsigned int *icp;
+
+       for (p = np; (p = intr_parent(p)) != NULL; ) {
+               icp = (unsigned int *)
+                       get_property(p, "#interrupt-cells", NULL);
+               if (icp != NULL)
+                       return *icp;
+               if (get_property(p, "interrupt-controller", NULL) != NULL
+                   || get_property(p, "interrupt-map", NULL) != NULL) {
+                       printk("oops, node %s doesn't have #interrupt-cells\n",
+                              p->full_name);
+                       return 1;
+               }
+       }
+#ifdef DEBUG_IRQ
+       printk("prom_n_intr_cells failed for %s\n", np->full_name);
+#endif
+       return 1;
+}
+
+/*
+ * Map an interrupt from a device up to the platform interrupt
+ * descriptor.
+ */
+static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
+                                  struct device_node *np, unsigned int *ints,
+                                  int nintrc)
+{
+       struct device_node *p, *ipar;
+       unsigned int *imap, *imask, *ip;
+       int i, imaplen, match;
+       int newintrc = 0, newaddrc = 0;
+       unsigned int *reg;
+       int naddrc;
+
+       reg = (unsigned int *) get_property(np, "reg", NULL);
+       naddrc = prom_n_addr_cells(np);
+       p = intr_parent(np);
+       while (p != NULL) {
+               if (get_property(p, "interrupt-controller", NULL) != NULL)
+                       /* this node is an interrupt controller, stop here */
+                       break;
+               imap = (unsigned int *)
+                       get_property(p, "interrupt-map", &imaplen);
+               if (imap == NULL) {
+                       p = intr_parent(p);
+                       continue;
+               }
+               imask = (unsigned int *)
+                       get_property(p, "interrupt-map-mask", NULL);
+               if (imask == NULL) {
+                       printk("oops, %s has interrupt-map but no mask\n",
+                              p->full_name);
+                       return 0;
+               }
+               imaplen /= sizeof(unsigned int);
+               match = 0;
+               ipar = NULL;
+               while (imaplen > 0 && !match) {
+                       /* check the child-interrupt field */
+                       match = 1;
+                       for (i = 0; i < naddrc && match; ++i)
+                               match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
+                       for (; i < naddrc + nintrc && match; ++i)
+                               match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
+                       imap += naddrc + nintrc;
+                       imaplen -= naddrc + nintrc;
+                       /* grab the interrupt parent */
+                       ipar = find_phandle((phandle) *imap++);
+                       --imaplen;
+                       if (ipar == NULL && num_interrupt_controllers == 1)
+                               /* cope with BootX not giving us phandles */
+                               ipar = dflt_interrupt_controller;
+                       if (ipar == NULL) {
+                               printk("oops, no int parent %x in map of %s\n",
+                                      imap[-1], p->full_name);
+                               return 0;
+                       }
+                       /* find the parent's # addr and intr cells */
+                       ip = (unsigned int *)
+                               get_property(ipar, "#interrupt-cells", NULL);
+                       if (ip == NULL) {
+                               printk("oops, no #interrupt-cells on %s\n",
+                                      ipar->full_name);
+                               return 0;
+                       }
+                       newintrc = *ip;
+                       ip = (unsigned int *)
+                               get_property(ipar, "#address-cells", NULL);
+                       newaddrc = (ip == NULL)? 0: *ip;
+                       imap += newaddrc + newintrc;
+                       imaplen -= newaddrc + newintrc;
+               }
+               if (imaplen < 0) {
+                       printk("oops, error decoding int-map on %s, len=%d\n",
+                              p->full_name, imaplen);
+                       return 0;
+               }
+               if (!match) {
+#ifdef DEBUG_IRQ
+                       printk("oops, no match in %s int-map for %s\n",
+                              p->full_name, np->full_name);
+#endif
+                       return 0;
+               }
+               p = ipar;
+               naddrc = newaddrc;
+               nintrc = newintrc;
+               ints = imap - nintrc;
+               reg = ints - naddrc;
+       }
+       if (p == NULL) {
+#ifdef DEBUG_IRQ
+               printk("hmmm, int tree for %s doesn't have ctrler\n",
+                      np->full_name);
+#endif
+               return 0;
+       }
+       *irq = ints;
+       *ictrler = p;
+       return nintrc;
+}
+
+static int __devinit finish_node_interrupts(struct device_node *np,
+                                           unsigned long *mem_start,
+                                           int measure_only)
+{
+       unsigned int *ints;
+       int intlen, intrcells, intrcount;
+       int i, j, n;
+       unsigned int *irq, virq;
+       struct device_node *ic;
+
+       ints = (unsigned int *) get_property(np, "interrupts", &intlen);
+       if (ints == NULL)
+               return 0;
+       intrcells = prom_n_intr_cells(np);
+       intlen /= intrcells * sizeof(unsigned int);
+
+       np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
+       if (!np->intrs)
+               return -ENOMEM;
+
+       if (measure_only)
+               return 0;
+
+       intrcount = 0;
+       for (i = 0; i < intlen; ++i, ints += intrcells) {
+               n = map_interrupt(&irq, &ic, np, ints, intrcells);
+               if (n <= 0)
+                       continue;
+
+               /* don't map IRQ numbers under a cascaded 8259 controller */
+               if (ic && device_is_compatible(ic, "chrp,iic")) {
+                       np->intrs[intrcount].line = irq[0];
+               } else {
+#ifdef CONFIG_PPC64
+                       virq = virt_irq_create_mapping(irq[0]);
+                       if (virq == NO_IRQ) {
+                               printk(KERN_CRIT "Could not allocate interrupt"
+                                      " number for %s\n", np->full_name);
+                               continue;
+                       }
+                       virq = irq_offset_up(virq);
+#else
+                       virq = irq[0];
+#endif
+                       np->intrs[intrcount].line = virq;
+               }
+
+#ifdef CONFIG_PPC64
+               /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
+               if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
+                       char *name = get_property(ic->parent, "name", NULL);
+                       if (name && !strcmp(name, "u3"))
+                               np->intrs[intrcount].line += 128;
+                       else if (!(name && !strcmp(name, "mac-io")))
+                               /* ignore other cascaded controllers, such as
+                                  the k2-sata-root */
+                               break;
+               }
+#endif
+               np->intrs[intrcount].sense = 1;
+               if (n > 1)
+                       np->intrs[intrcount].sense = irq[1];
+               if (n > 2) {
+                       printk("hmmm, got %d intr cells for %s:", n,
+                              np->full_name);
+                       for (j = 0; j < n; ++j)
+                               printk(" %d", irq[j]);
+                       printk("\n");
+               }
+               ++intrcount;
+       }
+       np->n_intrs = intrcount;
+
+       return 0;
+}
+
+static int __devinit interpret_pci_props(struct device_node *np,
+                                        unsigned long *mem_start,
+                                        int naddrc, int nsizec,
+                                        int measure_only)
+{
+       struct address_range *adr;
+       struct pci_reg_property *pci_addrs;
+       int i, l, n_addrs;
+
+       pci_addrs = (struct pci_reg_property *)
+               get_property(np, "assigned-addresses", &l);
+       if (!pci_addrs)
+               return 0;
+
+       n_addrs = l / sizeof(*pci_addrs);
+
+       adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
+       if (!adr)
+               return -ENOMEM;
+
+       if (measure_only)
+               return 0;
+
+       np->addrs = adr;
+       np->n_addrs = n_addrs;
+
+       for (i = 0; i < n_addrs; i++) {
+               adr[i].space = pci_addrs[i].addr.a_hi;
+               adr[i].address = pci_addrs[i].addr.a_lo |
+                       ((u64)pci_addrs[i].addr.a_mid << 32);
+               adr[i].size = pci_addrs[i].size_lo;
+       }
+
+       return 0;
+}
+
+static int __init interpret_dbdma_props(struct device_node *np,
+                                       unsigned long *mem_start,
+                                       int naddrc, int nsizec,
+                                       int measure_only)
+{
+       struct reg_property32 *rp;
+       struct address_range *adr;
+       unsigned long base_address;
+       int i, l;
+       struct device_node *db;
+
+       base_address = 0;
+       if (!measure_only) {
+               for (db = np->parent; db != NULL; db = db->parent) {
+                       if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
+                               base_address = db->addrs[0].address;
+                               break;
+                       }
+               }
+       }
+
+       rp = (struct reg_property32 *) get_property(np, "reg", &l);
+       if (rp != 0 && l >= sizeof(struct reg_property32)) {
+               i = 0;
+               adr = (struct address_range *) (*mem_start);
+               while ((l -= sizeof(struct reg_property32)) >= 0) {
+                       if (!measure_only) {
+                               adr[i].space = 2;
+                               adr[i].address = rp[i].address + base_address;
+                               adr[i].size = rp[i].size;
+                       }
+                       ++i;
+               }
+               np->addrs = adr;
+               np->n_addrs = i;
+               (*mem_start) += i * sizeof(struct address_range);
+       }
+
+       return 0;
+}
+
+static int __init interpret_macio_props(struct device_node *np,
+                                       unsigned long *mem_start,
+                                       int naddrc, int nsizec,
+                                       int measure_only)
+{
+       struct reg_property32 *rp;
+       struct address_range *adr;
+       unsigned long base_address;
+       int i, l;
+       struct device_node *db;
+
+       base_address = 0;
+       if (!measure_only) {
+               for (db = np->parent; db != NULL; db = db->parent) {
+                       if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
+                               base_address = db->addrs[0].address;
+                               break;
+                       }
+               }
+       }
+
+       rp = (struct reg_property32 *) get_property(np, "reg", &l);
+       if (rp != 0 && l >= sizeof(struct reg_property32)) {
+               i = 0;
+               adr = (struct address_range *) (*mem_start);
+               while ((l -= sizeof(struct reg_property32)) >= 0) {
+                       if (!measure_only) {
+                               adr[i].space = 2;
+                               adr[i].address = rp[i].address + base_address;
+                               adr[i].size = rp[i].size;
+                       }
+                       ++i;
+               }
+               np->addrs = adr;
+               np->n_addrs = i;
+               (*mem_start) += i * sizeof(struct address_range);
+       }
+
+       return 0;
+}
+
+static int __init interpret_isa_props(struct device_node *np,
+                                     unsigned long *mem_start,
+                                     int naddrc, int nsizec,
+                                     int measure_only)
+{
+       struct isa_reg_property *rp;
+       struct address_range *adr;
+       int i, l;
+
+       rp = (struct isa_reg_property *) get_property(np, "reg", &l);
+       if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
+               i = 0;
+               adr = (struct address_range *) (*mem_start);
+               while ((l -= sizeof(struct isa_reg_property)) >= 0) {
+                       if (!measure_only) {
+                               adr[i].space = rp[i].space;
+                               adr[i].address = rp[i].address;
+                               adr[i].size = rp[i].size;
+                       }
+                       ++i;
+               }
+               np->addrs = adr;
+               np->n_addrs = i;
+               (*mem_start) += i * sizeof(struct address_range);
+       }
+
+       return 0;
+}
+
+static int __init interpret_root_props(struct device_node *np,
+                                      unsigned long *mem_start,
+                                      int naddrc, int nsizec,
+                                      int measure_only)
+{
+       struct address_range *adr;
+       int i, l;
+       unsigned int *rp;
+       int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
+
+       rp = (unsigned int *) get_property(np, "reg", &l);
+       if (rp != 0 && l >= rpsize) {
+               i = 0;
+               adr = (struct address_range *) (*mem_start);
+               while ((l -= rpsize) >= 0) {
+                       if (!measure_only) {
+                               adr[i].space = 0;
+                               adr[i].address = rp[naddrc - 1];
+                               adr[i].size = rp[naddrc + nsizec - 1];
+                       }
+                       ++i;
+                       rp += naddrc + nsizec;
+               }
+               np->addrs = adr;
+               np->n_addrs = i;
+               (*mem_start) += i * sizeof(struct address_range);
+       }
+
+       return 0;
+}
+
+static int __devinit finish_node(struct device_node *np,
+                                unsigned long *mem_start,
+                                interpret_func *ifunc,
+                                int naddrc, int nsizec,
+                                int measure_only)
+{
+       struct device_node *child;
+       int *ip, rc = 0;
+
+       /* get the device addresses and interrupts */
+       if (ifunc != NULL)
+               rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
+       if (rc)
+               goto out;
+
+       rc = finish_node_interrupts(np, mem_start, measure_only);
+       if (rc)
+               goto out;
+
+       /* Look for #address-cells and #size-cells properties. */
+       ip = (int *) get_property(np, "#address-cells", NULL);
+       if (ip != NULL)
+               naddrc = *ip;
+       ip = (int *) get_property(np, "#size-cells", NULL);
+       if (ip != NULL)
+               nsizec = *ip;
+
+       if (!strcmp(np->name, "device-tree") || np->parent == NULL)
+               ifunc = interpret_root_props;
+       else if (np->type == 0)
+               ifunc = NULL;
+       else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
+               ifunc = interpret_pci_props;
+       else if (!strcmp(np->type, "dbdma"))
+               ifunc = interpret_dbdma_props;
+       else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
+               ifunc = interpret_macio_props;
+       else if (!strcmp(np->type, "isa"))
+               ifunc = interpret_isa_props;
+       else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
+               ifunc = interpret_root_props;
+       else if (!((ifunc == interpret_dbdma_props
+                   || ifunc == interpret_macio_props)
+                  && (!strcmp(np->type, "escc")
+                      || !strcmp(np->type, "media-bay"))))
+               ifunc = NULL;
+
+       for (child = np->child; child != NULL; child = child->sibling) {
+               rc = finish_node(child, mem_start, ifunc,
+                                naddrc, nsizec, measure_only);
+               if (rc)
+                       goto out;
+       }
+out:
+       return rc;
+}
+
+static void __init scan_interrupt_controllers(void)
+{
+       struct device_node *np;
+       int n = 0;
+       char *name, *ic;
+       int iclen;
+
+       for (np = allnodes; np != NULL; np = np->allnext) {
+               ic = get_property(np, "interrupt-controller", &iclen);
+               name = get_property(np, "name", NULL);
+               /* checking iclen makes sure we don't get a false
+                  match on /chosen.interrupt_controller */
+               if ((name != NULL
+                    && strcmp(name, "interrupt-controller") == 0)
+                   || (ic != NULL && iclen == 0
+                       && strcmp(name, "AppleKiwi"))) {
+                       if (n == 0)
+                               dflt_interrupt_controller = np;
+                       ++n;
+               }
+       }
+       num_interrupt_controllers = n;
+}
+
+/**
+ * finish_device_tree is called once things are running normally
+ * (i.e. with text and data mapped to the address they were linked at).
+ * It traverses the device tree and fills in some of the additional,
+ * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
+ * mapping is also initialized at this point.
+ */
+void __init finish_device_tree(void)
+{
+       unsigned long start, end, size = 0;
+
+       DBG(" -> finish_device_tree\n");
+
+#ifdef CONFIG_PPC64
+       /* Initialize virtual IRQ map */
+       virt_irq_init();
+#endif
+       scan_interrupt_controllers();
+
+       /*
+        * Finish device-tree (pre-parsing some properties etc...)
+        * We do this in 2 passes. One with "measure_only" set, which
+        * will only measure the amount of memory needed, then we can
+        * allocate that memory, and call finish_node again. However,
+        * we must be careful as most routines will fail nowadays when
+        * prom_alloc() returns 0, so we must make sure our first pass
+        * doesn't start at 0. We pre-initialize size to 16 for that
+        * reason and then remove those additional 16 bytes
+        */
+       size = 16;
+       finish_node(allnodes, &size, NULL, 0, 0, 1);
+       size -= 16;
+       end = start = (unsigned long) __va(lmb_alloc(size, 128));
+       finish_node(allnodes, &end, NULL, 0, 0, 0);
+       BUG_ON(end != start + size);
+
+       DBG(" <- finish_device_tree\n");
+}
+
+static inline char *find_flat_dt_string(u32 offset)
+{
+       return ((char *)initial_boot_params) +
+               initial_boot_params->off_dt_strings + offset;
+}
+
+/**
+ * This function is used to scan the flattened device-tree, it is
+ * used to extract the memory informations at boot before we can
+ * unflatten the tree
+ */
+static int __init scan_flat_dt(int (*it)(unsigned long node,
+                                        const char *uname, int depth,
+                                        void *data),
+                              void *data)
+{
+       unsigned long p = ((unsigned long)initial_boot_params) +
+               initial_boot_params->off_dt_struct;
+       int rc = 0;
+       int depth = -1;
+
+       do {
+               u32 tag = *((u32 *)p);
+               char *pathp;
+               
+               p += 4;
+               if (tag == OF_DT_END_NODE) {
+                       depth --;
+                       continue;
+               }
+               if (tag == OF_DT_NOP)
+                       continue;
+               if (tag == OF_DT_END)
+                       break;
+               if (tag == OF_DT_PROP) {
+                       u32 sz = *((u32 *)p);
+                       p += 8;
+                       if (initial_boot_params->version < 0x10)
+                               p = _ALIGN(p, sz >= 8 ? 8 : 4);
+                       p += sz;
+                       p = _ALIGN(p, 4);
+                       continue;
+               }
+               if (tag != OF_DT_BEGIN_NODE) {
+                       printk(KERN_WARNING "Invalid tag %x scanning flattened"
+                              " device tree !\n", tag);
+                       return -EINVAL;
+               }
+               depth++;
+               pathp = (char *)p;
+               p = _ALIGN(p + strlen(pathp) + 1, 4);
+               if ((*pathp) == '/') {
+                       char *lp, *np;
+                       for (lp = NULL, np = pathp; *np; np++)
+                               if ((*np) == '/')
+                                       lp = np+1;
+                       if (lp != NULL)
+                               pathp = lp;
+               }
+               rc = it(p, pathp, depth, data);
+               if (rc != 0)
+                       break;          
+       } while(1);
+
+       return rc;
+}
+
+/**
+ * This  function can be used within scan_flattened_dt callback to get
+ * access to properties
+ */
+static void* __init get_flat_dt_prop(unsigned long node, const char *name,
+                                    unsigned long *size)
+{
+       unsigned long p = node;
+
+       do {
+               u32 tag = *((u32 *)p);
+               u32 sz, noff;
+               const char *nstr;
+
+               p += 4;
+               if (tag == OF_DT_NOP)
+                       continue;
+               if (tag != OF_DT_PROP)
+                       return NULL;
+
+               sz = *((u32 *)p);
+               noff = *((u32 *)(p + 4));
+               p += 8;
+               if (initial_boot_params->version < 0x10)
+                       p = _ALIGN(p, sz >= 8 ? 8 : 4);
+
+               nstr = find_flat_dt_string(noff);
+               if (nstr == NULL) {
+                       printk(KERN_WARNING "Can't find property index"
+                              " name !\n");
+                       return NULL;
+               }
+               if (strcmp(name, nstr) == 0) {
+                       if (size)
+                               *size = sz;
+                       return (void *)p;
+               }
+               p += sz;
+               p = _ALIGN(p, 4);
+       } while(1);
+}
+
+static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
+                                      unsigned long align)
+{
+       void *res;
+
+       *mem = _ALIGN(*mem, align);
+       res = (void *)*mem;
+       *mem += size;
+
+       return res;
+}
+
+static unsigned long __init unflatten_dt_node(unsigned long mem,
+                                             unsigned long *p,
+                                             struct device_node *dad,
+                                             struct device_node ***allnextpp,
+                                             unsigned long fpsize)
+{
+       struct device_node *np;
+       struct property *pp, **prev_pp = NULL;
+       char *pathp;
+       u32 tag;
+       unsigned int l, allocl;
+       int has_name = 0;
+       int new_format = 0;
+
+       tag = *((u32 *)(*p));
+       if (tag != OF_DT_BEGIN_NODE) {
+               printk("Weird tag at start of node: %x\n", tag);
+               return mem;
+       }
+       *p += 4;
+       pathp = (char *)*p;
+       l = allocl = strlen(pathp) + 1;
+       *p = _ALIGN(*p + l, 4);
+
+       /* version 0x10 has a more compact unit name here instead of the full
+        * path. we accumulate the full path size using "fpsize", we'll rebuild
+        * it later. We detect this because the first character of the name is
+        * not '/'.
+        */
+       if ((*pathp) != '/') {
+               new_format = 1;
+               if (fpsize == 0) {
+                       /* root node: special case. fpsize accounts for path
+                        * plus terminating zero. root node only has '/', so
+                        * fpsize should be 2, but we want to avoid the first
+                        * level nodes to have two '/' so we use fpsize 1 here
+                        */
+                       fpsize = 1;
+                       allocl = 2;
+               } else {
+                       /* account for '/' and path size minus terminal 0
+                        * already in 'l'
+                        */
+                       fpsize += l;
+                       allocl = fpsize;
+               }
+       }
+
+
+       np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
+                               __alignof__(struct device_node));
+       if (allnextpp) {
+               memset(np, 0, sizeof(*np));
+               np->full_name = ((char*)np) + sizeof(struct device_node);
+               if (new_format) {
+                       char *p = np->full_name;
+                       /* rebuild full path for new format */
+                       if (dad && dad->parent) {
+                               strcpy(p, dad->full_name);
+#ifdef DEBUG
+                               if ((strlen(p) + l + 1) != allocl) {
+                                       DBG("%s: p: %d, l: %d, a: %d\n",
+                                           pathp, strlen(p), l, allocl);
+                               }
+#endif
+                               p += strlen(p);
+                       }
+                       *(p++) = '/';
+                       memcpy(p, pathp, l);
+               } else
+                       memcpy(np->full_name, pathp, l);
+               prev_pp = &np->properties;
+               **allnextpp = np;
+               *allnextpp = &np->allnext;
+               if (dad != NULL) {
+                       np->parent = dad;
+                       /* we temporarily use the next field as `last_child'*/
+                       if (dad->next == 0)
+                               dad->child = np;
+                       else
+                               dad->next->sibling = np;
+                       dad->next = np;
+               }
+               kref_init(&np->kref);
+       }
+       while(1) {
+               u32 sz, noff;
+               char *pname;
+
+               tag = *((u32 *)(*p));
+               if (tag == OF_DT_NOP) {
+                       *p += 4;
+                       continue;
+               }
+               if (tag != OF_DT_PROP)
+                       break;
+               *p += 4;
+               sz = *((u32 *)(*p));
+               noff = *((u32 *)((*p) + 4));
+               *p += 8;
+               if (initial_boot_params->version < 0x10)
+                       *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
+
+               pname = find_flat_dt_string(noff);
+               if (pname == NULL) {
+                       printk("Can't find property name in list !\n");
+                       break;
+               }
+               if (strcmp(pname, "name") == 0)
+                       has_name = 1;
+               l = strlen(pname) + 1;
+               pp = unflatten_dt_alloc(&mem, sizeof(struct property),
+                                       __alignof__(struct property));
+               if (allnextpp) {
+                       if (strcmp(pname, "linux,phandle") == 0) {
+                               np->node = *((u32 *)*p);
+                               if (np->linux_phandle == 0)
+                                       np->linux_phandle = np->node;
+                       }
+                       if (strcmp(pname, "ibm,phandle") == 0)
+                               np->linux_phandle = *((u32 *)*p);
+                       pp->name = pname;
+                       pp->length = sz;
+                       pp->value = (void *)*p;
+                       *prev_pp = pp;
+                       prev_pp = &pp->next;
+               }
+               *p = _ALIGN((*p) + sz, 4);
+       }
+       /* with version 0x10 we may not have the name property, recreate
+        * it here from the unit name if absent
+        */
+       if (!has_name) {
+               char *p = pathp, *ps = pathp, *pa = NULL;
+               int sz;
+
+               while (*p) {
+                       if ((*p) == '@')
+                               pa = p;
+                       if ((*p) == '/')
+                               ps = p + 1;
+                       p++;
+               }
+               if (pa < ps)
+                       pa = p;
+               sz = (pa - ps) + 1;
+               pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
+                                       __alignof__(struct property));
+               if (allnextpp) {
+                       pp->name = "name";
+                       pp->length = sz;
+                       pp->value = (unsigned char *)(pp + 1);
+                       *prev_pp = pp;
+                       prev_pp = &pp->next;
+                       memcpy(pp->value, ps, sz - 1);
+                       ((char *)pp->value)[sz - 1] = 0;
+                       DBG("fixed up name for %s -> %s\n", pathp, pp->value);
+               }
+       }
+       if (allnextpp) {
+               *prev_pp = NULL;
+               np->name = get_property(np, "name", NULL);
+               np->type = get_property(np, "device_type", NULL);
+
+               if (!np->name)
+                       np->name = "<NULL>";
+               if (!np->type)
+                       np->type = "<NULL>";
+       }
+       while (tag == OF_DT_BEGIN_NODE) {
+               mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
+               tag = *((u32 *)(*p));
+       }
+       if (tag != OF_DT_END_NODE) {
+               printk("Weird tag at end of node: %x\n", tag);
+               return mem;
+       }
+       *p += 4;
+       return mem;
+}
+
+
+/**
+ * unflattens the device-tree passed by the firmware, creating the
+ * tree of struct device_node. It also fills the "name" and "type"
+ * pointers of the nodes so the normal device-tree walking functions
+ * can be used (this used to be done by finish_device_tree)
+ */
+void __init unflatten_device_tree(void)
+{
+       unsigned long start, mem, size;
+       struct device_node **allnextp = &allnodes;
+       char *p = NULL;
+       int l = 0;
+
+       DBG(" -> unflatten_device_tree()\n");
+
+       /* First pass, scan for size */
+       start = ((unsigned long)initial_boot_params) +
+               initial_boot_params->off_dt_struct;
+       size = unflatten_dt_node(0, &start, NULL, NULL, 0);
+       size = (size | 3) + 1;
+
+       DBG("  size is %lx, allocating...\n", size);
+
+       /* Allocate memory for the expanded device tree */
+       mem = lmb_alloc(size + 4, __alignof__(struct device_node));
+       if (!mem) {
+               DBG("Couldn't allocate memory with lmb_alloc()!\n");
+               panic("Couldn't allocate memory with lmb_alloc()!\n");
+       }
+       mem = (unsigned long) __va(mem);
+
+       ((u32 *)mem)[size / 4] = 0xdeadbeef;
+
+       DBG("  unflattening %lx...\n", mem);
+
+       /* Second pass, do actual unflattening */
+       start = ((unsigned long)initial_boot_params) +
+               initial_boot_params->off_dt_struct;
+       unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
+       if (*((u32 *)start) != OF_DT_END)
+               printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
+       if (((u32 *)mem)[size / 4] != 0xdeadbeef)
+               printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
+                      ((u32 *)mem)[size / 4] );
+       *allnextp = NULL;
+
+       /* Get pointer to OF "/chosen" node for use everywhere */
+       of_chosen = of_find_node_by_path("/chosen");
+
+       /* Retreive command line */
+       if (of_chosen != NULL) {
+               p = (char *)get_property(of_chosen, "bootargs", &l);
+               if (p != NULL && l > 0)
+                       strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
+       }
+#ifdef CONFIG_CMDLINE
+       if (l == 0 || (l == 1 && (*p) == 0))
+               strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#endif /* CONFIG_CMDLINE */
+
+       DBG("Command line is: %s\n", cmd_line);
+
+       DBG(" <- unflatten_device_tree()\n");
+}
+
+
+static int __init early_init_dt_scan_cpus(unsigned long node,
+                                         const char *uname, int depth, void *data)
+{
+       char *type = get_flat_dt_prop(node, "device_type", NULL);
+       u32 *prop;
+       unsigned long size = 0;
+
+       /* We are scanning "cpu" nodes only */
+       if (type == NULL || strcmp(type, "cpu") != 0)
+               return 0;
+
+#ifdef CONFIG_PPC_PSERIES
+       /* On LPAR, look for the first ibm,pft-size property for the  hash table size
+        */
+       if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
+               u32 *pft_size;
+               pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
+               if (pft_size != NULL) {
+                       /* pft_size[0] is the NUMA CEC cookie */
+                       ppc64_pft_size = pft_size[1];
+               }
+       }
+#endif
+
+#ifdef CONFIG_PPC64
+       if (initial_boot_params && initial_boot_params->version >= 2) {
+               /* version 2 of the kexec param format adds the phys cpuid
+                * of booted proc.
+                */
+               boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
+               boot_cpuid = 0;
+       } else {
+               /* Check if it's the boot-cpu, set it's hw index in paca now */
+               if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
+                       prop = get_flat_dt_prop(node, "reg", NULL);
+                       set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
+                       boot_cpuid_phys = get_hard_smp_processor_id(0);
+               }
+       }
+#endif
+
+#ifdef CONFIG_ALTIVEC
+       /* Check if we have a VMX and eventually update CPU features */
+       prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
+       if (prop && (*prop) > 0) {
+               cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
+               cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
+       }
+
+       /* Same goes for Apple's "altivec" property */
+       prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
+       if (prop) {
+               cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
+               cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
+       }
+#endif /* CONFIG_ALTIVEC */
+
+#ifdef CONFIG_PPC_PSERIES
+       /*
+        * Check for an SMT capable CPU and set the CPU feature. We do
+        * this by looking at the size of the ibm,ppc-interrupt-server#s
+        * property
+        */
+       prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
+                                      &size);
+       cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
+       if (prop && ((size / sizeof(u32)) > 1))
+               cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
+#endif
+
+       return 0;
+}
+
+static int __init early_init_dt_scan_chosen(unsigned long node,
+                                           const char *uname, int depth, void *data)
+{
+       u32 *prop;
+       unsigned long *lprop;
+
+       DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+
+       if (depth != 1 || strcmp(uname, "chosen") != 0)
+               return 0;
+
+       /* get platform type */
+       prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
+       if (prop == NULL)
+               return 0;
+#ifdef CONFIG_PPC64
+       systemcfg->platform = *prop;
+#else
+       _machine = *prop;
+#endif
+
+#ifdef CONFIG_PPC64
+       /* check if iommu is forced on or off */
+       if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
+               iommu_is_off = 1;
+       if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
+               iommu_force_on = 1;
+#endif
+
+       lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
+       if (lprop)
+               memory_limit = *lprop;
+
+#ifdef CONFIG_PPC64
+       lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
+       if (lprop)
+               tce_alloc_start = *lprop;
+       lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
+       if (lprop)
+               tce_alloc_end = *lprop;
+#endif
+
+#ifdef CONFIG_PPC_RTAS
+       /* To help early debugging via the front panel, we retreive a minimal
+        * set of RTAS infos now if available
+        */
+       {
+               u64 *basep, *entryp;
+
+               basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
+               entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
+               prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
+               if (basep && entryp && prop) {
+                       rtas.base = *basep;
+                       rtas.entry = *entryp;
+                       rtas.size = *prop;
+               }
+       }
+#endif /* CONFIG_PPC_RTAS */
+
+       /* break now */
+       return 1;
+}
+
+static int __init early_init_dt_scan_root(unsigned long node,
+                                         const char *uname, int depth, void *data)
+{
+       u32 *prop;
+
+       if (depth != 0)
+               return 0;
+
+       prop = get_flat_dt_prop(node, "#size-cells", NULL);
+       dt_root_size_cells = (prop == NULL) ? 1 : *prop;
+       DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
+
+       prop = get_flat_dt_prop(node, "#address-cells", NULL);
+       dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
+       DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
+       
+       /* break now */
+       return 1;
+}
+
+static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
+{
+       cell_t *p = *cellp;
+       unsigned long r;
+
+       /* Ignore more than 2 cells */
+       while (s > sizeof(unsigned long) / 4) {
+               p++;
+               s--;
+       }
+       r = *p++;
+#ifdef CONFIG_PPC64
+       if (s > 1) {
+               r <<= 32;
+               r |= *(p++);
+               s--;
+       }
+#endif
+
+       *cellp = p;
+       return r;
+}
+
+
+static int __init early_init_dt_scan_memory(unsigned long node,
+                                           const char *uname, int depth, void *data)
+{
+       char *type = get_flat_dt_prop(node, "device_type", NULL);
+       cell_t *reg, *endp;
+       unsigned long l;
+
+       /* We are scanning "memory" nodes only */
+       if (type == NULL || strcmp(type, "memory") != 0)
+               return 0;
+
+       reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
+       if (reg == NULL)
+               return 0;
+
+       endp = reg + (l / sizeof(cell_t));
+
+       DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
+           uname, l, reg[0], reg[1], reg[2], reg[3]);
+
+       while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
+               unsigned long base, size;
+
+               base = dt_mem_next_cell(dt_root_addr_cells, &reg);
+               size = dt_mem_next_cell(dt_root_size_cells, &reg);
+
+               if (size == 0)
+                       continue;
+               DBG(" - %lx ,  %lx\n", base, size);
+#ifdef CONFIG_PPC64
+               if (iommu_is_off) {
+                       if (base >= 0x80000000ul)
+                               continue;
+                       if ((base + size) > 0x80000000ul)
+                               size = 0x80000000ul - base;
+               }
+#endif
+               lmb_add(base, size);
+       }
+       return 0;
+}
+
+static void __init early_reserve_mem(void)
+{
+       unsigned long base, size;
+       unsigned long *reserve_map;
+
+       reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
+                                       initial_boot_params->off_mem_rsvmap);
+       while (1) {
+               base = *(reserve_map++);
+               size = *(reserve_map++);
+               if (size == 0)
+                       break;
+               DBG("reserving: %lx -> %lx\n", base, size);
+               lmb_reserve(base, size);
+       }
+
+#if 0
+       DBG("memory reserved, lmbs :\n");
+       lmb_dump_all();
+#endif
+}
+
+void __init early_init_devtree(void *params)
+{
+       DBG(" -> early_init_devtree()\n");
+
+       /* Setup flat device-tree pointer */
+       initial_boot_params = params;
+
+       /* Retrieve various informations from the /chosen node of the
+        * device-tree, including the platform type, initrd location and
+        * size, TCE reserve, and more ...
+        */
+       scan_flat_dt(early_init_dt_scan_chosen, NULL);
+
+       /* Scan memory nodes and rebuild LMBs */
+       lmb_init();
+       scan_flat_dt(early_init_dt_scan_root, NULL);
+       scan_flat_dt(early_init_dt_scan_memory, NULL);
+       lmb_enforce_memory_limit(memory_limit);
+       lmb_analyze();
+#ifdef CONFIG_PPC64
+       systemcfg->physicalMemorySize = lmb_phys_mem_size();
+#endif
+       lmb_reserve(0, __pa(klimit));
+
+       DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
+
+       /* Reserve LMB regions used by kernel, initrd, dt, etc... */
+       early_reserve_mem();
+
+       DBG("Scanning CPUs ...\n");
+
+       /* Retreive hash table size from flattened tree plus other
+        * CPU related informations (altivec support, boot CPU ID, ...)
+        */
+       scan_flat_dt(early_init_dt_scan_cpus, NULL);
+
+       DBG(" <- early_init_devtree()\n");
+}
+
+#undef printk
+
+int
+prom_n_addr_cells(struct device_node* np)
+{
+       int* ip;
+       do {
+               if (np->parent)
+                       np = np->parent;
+               ip = (int *) get_property(np, "#address-cells", NULL);
+               if (ip != NULL)
+                       return *ip;
+       } while (np->parent);
+       /* No #address-cells property for the root node, default to 1 */
+       return 1;
+}
+
+int
+prom_n_size_cells(struct device_node* np)
+{
+       int* ip;
+       do {
+               if (np->parent)
+                       np = np->parent;
+               ip = (int *) get_property(np, "#size-cells", NULL);
+               if (ip != NULL)
+                       return *ip;
+       } while (np->parent);
+       /* No #size-cells property for the root node, default to 1 */
+       return 1;
+}
+
+/**
+ * Work out the sense (active-low level / active-high edge)
+ * of each interrupt from the device tree.
+ */
+void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
+{
+       struct device_node *np;
+       int i, j;
+
+       /* default to level-triggered */
+       memset(senses, 1, max - off);
+
+       for (np = allnodes; np != 0; np = np->allnext) {
+               for (j = 0; j < np->n_intrs; j++) {
+                       i = np->intrs[j].line;
+                       if (i >= off && i < max)
+                               senses[i-off] = np->intrs[j].sense ?
+                                       IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE :
+                                       IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE;
+               }
+       }
+}
+
+/**
+ * Construct and return a list of the device_nodes with a given name.
+ */
+struct device_node *find_devices(const char *name)
+{
+       struct device_node *head, **prevp, *np;
+
+       prevp = &head;
+       for (np = allnodes; np != 0; np = np->allnext) {
+               if (np->name != 0 && strcasecmp(np->name, name) == 0) {
+                       *prevp = np;
+                       prevp = &np->next;
+               }
+       }
+       *prevp = NULL;
+       return head;
+}
+EXPORT_SYMBOL(find_devices);
+
+/**
+ * Construct and return a list of the device_nodes with a given type.
+ */
+struct device_node *find_type_devices(const char *type)
+{
+       struct device_node *head, **prevp, *np;
+
+       prevp = &head;
+       for (np = allnodes; np != 0; np = np->allnext) {
+               if (np->type != 0 && strcasecmp(np->type, type) == 0) {
+                       *prevp = np;
+                       prevp = &np->next;
+               }
+       }
+       *prevp = NULL;
+       return head;
+}
+EXPORT_SYMBOL(find_type_devices);
+
+/**
+ * Returns all nodes linked together
+ */
+struct device_node *find_all_nodes(void)
+{
+       struct device_node *head, **prevp, *np;
+
+       prevp = &head;
+       for (np = allnodes; np != 0; np = np->allnext) {
+               *prevp = np;
+               prevp = &np->next;
+       }
+       *prevp = NULL;
+       return head;
+}
+EXPORT_SYMBOL(find_all_nodes);
+
+/** Checks if the given "compat" string matches one of the strings in
+ * the device's "compatible" property
+ */
+int device_is_compatible(struct device_node *device, const char *compat)
+{
+       const char* cp;
+       int cplen, l;
+
+       cp = (char *) get_property(device, "compatible", &cplen);
+       if (cp == NULL)
+               return 0;
+       while (cplen > 0) {
+               if (strncasecmp(cp, compat, strlen(compat)) == 0)
+                       return 1;
+               l = strlen(cp) + 1;
+               cp += l;
+               cplen -= l;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(device_is_compatible);
+
+
+/**
+ * Indicates whether the root node has a given value in its
+ * compatible property.
+ */
+int machine_is_compatible(const char *compat)
+{
+       struct device_node *root;
+       int rc = 0;
+
+       root = of_find_node_by_path("/");
+       if (root) {
+               rc = device_is_compatible(root, compat);
+               of_node_put(root);
+       }
+       return rc;
+}
+EXPORT_SYMBOL(machine_is_compatible);
+
+/**
+ * Construct and return a list of the device_nodes with a given type
+ * and compatible property.
+ */
+struct device_node *find_compatible_devices(const char *type,
+                                           const char *compat)
+{
+       struct device_node *head, **prevp, *np;
+
+       prevp = &head;
+       for (np = allnodes; np != 0; np = np->allnext) {
+               if (type != NULL
+                   && !(np->type != 0 && strcasecmp(np->type, type) == 0))
+                       continue;
+               if (device_is_compatible(np, compat)) {
+                       *prevp = np;
+                       prevp = &np->next;
+               }
+       }
+       *prevp = NULL;
+       return head;
+}
+EXPORT_SYMBOL(find_compatible_devices);
+
+/**
+ * Find the device_node with a given full_name.
+ */
+struct device_node *find_path_device(const char *path)
+{
+       struct device_node *np;
+
+       for (np = allnodes; np != 0; np = np->allnext)
+               if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
+                       return np;
+       return NULL;
+}
+EXPORT_SYMBOL(find_path_device);
+
+/*******
+ *
+ * New implementation of the OF "find" APIs, return a refcounted
+ * object, call of_node_put() when done.  The device tree and list
+ * are protected by a rw_lock.
+ *
+ * Note that property management will need some locking as well,
+ * this isn't dealt with yet.
+ *
+ *******/
+
+/**
+ *     of_find_node_by_name - Find a node by its "name" property
+ *     @from:  The node to start searching from or NULL, the node
+ *             you pass will not be searched, only the next one
+ *             will; typically, you pass what the previous call
+ *             returned. of_node_put() will be called on it
+ *     @name:  The name string to match against
+ *
+ *     Returns a node pointer with refcount incremented, use
+ *     of_node_put() on it when done.
+ */
+struct device_node *of_find_node_by_name(struct device_node *from,
+       const char *name)
+{
+       struct device_node *np;
+
+       read_lock(&devtree_lock);
+       np = from ? from->allnext : allnodes;
+       for (; np != 0; np = np->allnext)
+               if (np->name != 0 && strcasecmp(np->name, name) == 0
+                   && of_node_get(np))
+                       break;
+       if (from)
+               of_node_put(from);
+       read_unlock(&devtree_lock);
+       return np;
+}
+EXPORT_SYMBOL(of_find_node_by_name);
+
+/**
+ *     of_find_node_by_type - Find a node by its "device_type" property
+ *     @from:  The node to start searching from or NULL, the node
+ *             you pass will not be searched, only the next one
+ *             will; typically, you pass what the previous call
+ *             returned. of_node_put() will be called on it
+ *     @name:  The type string to match against
+ *
+ *     Returns a node pointer with refcount incremented, use
+ *     of_node_put() on it when done.
+ */
+struct device_node *of_find_node_by_type(struct device_node *from,
+       const char *type)
+{
+       struct device_node *np;
+
+       read_lock(&devtree_lock);
+       np = from ? from->allnext : allnodes;
+       for (; np != 0; np = np->allnext)
+               if (np->type != 0 && strcasecmp(np->type, type) == 0
+                   && of_node_get(np))
+                       break;
+       if (from)
+               of_node_put(from);
+       read_unlock(&devtree_lock);
+       return np;
+}
+EXPORT_SYMBOL(of_find_node_by_type);
+
+/**
+ *     of_find_compatible_node - Find a node based on type and one of the
+ *                                tokens in its "compatible" property
+ *     @from:          The node to start searching from or NULL, the node
+ *                     you pass will not be searched, only the next one
+ *                     will; typically, you pass what the previous call
+ *                     returned. of_node_put() will be called on it
+ *     @type:          The type string to match "device_type" or NULL to ignore
+ *     @compatible:    The string to match to one of the tokens in the device
+ *                     "compatible" list.
+ *
+ *     Returns a node pointer with refcount incremented, use
+ *     of_node_put() on it when done.
+ */
+struct device_node *of_find_compatible_node(struct device_node *from,
+       const char *type, const char *compatible)
+{
+       struct device_node *np;
+
+       read_lock(&devtree_lock);
+       np = from ? from->allnext : allnodes;
+       for (; np != 0; np = np->allnext) {
+               if (type != NULL
+                   && !(np->type != 0 && strcasecmp(np->type, type) == 0))
+                       continue;
+               if (device_is_compatible(np, compatible) && of_node_get(np))
+                       break;
+       }
+       if (from)
+               of_node_put(from);
+       read_unlock(&devtree_lock);
+       return np;
+}
+EXPORT_SYMBOL(of_find_compatible_node);
+
+/**
+ *     of_find_node_by_path - Find a node matching a full OF path
+ *     @path:  The full path to match
+ *
+ *     Returns a node pointer with refcount incremented, use
+ *     of_node_put() on it when done.
+ */
+struct device_node *of_find_node_by_path(const char *path)
+{
+       struct device_node *np = allnodes;
+
+       read_lock(&devtree_lock);
+       for (; np != 0; np = np->allnext) {
+               if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
+                   && of_node_get(np))
+                       break;
+       }
+       read_unlock(&devtree_lock);
+       return np;
+}
+EXPORT_SYMBOL(of_find_node_by_path);
+
+/**
+ *     of_find_node_by_phandle - Find a node given a phandle
+ *     @handle:        phandle of the node to find
+ *
+ *     Returns a node pointer with refcount incremented, use
+ *     of_node_put() on it when done.
+ */
+struct device_node *of_find_node_by_phandle(phandle handle)
+{
+       struct device_node *np;
+
+       read_lock(&devtree_lock);
+       for (np = allnodes; np != 0; np = np->allnext)
+               if (np->linux_phandle == handle)
+                       break;
+       if (np)
+               of_node_get(np);
+       read_unlock(&devtree_lock);
+       return np;
+}
+EXPORT_SYMBOL(of_find_node_by_phandle);
+
+/**
+ *     of_find_all_nodes - Get next node in global list
+ *     @prev:  Previous node or NULL to start iteration
+ *             of_node_put() will be called on it
+ *
+ *     Returns a node pointer with refcount incremented, use
+ *     of_node_put() on it when done.
+ */
+struct device_node *of_find_all_nodes(struct device_node *prev)
+{
+       struct device_node *np;
+
+       read_lock(&devtree_lock);
+       np = prev ? prev->allnext : allnodes;
+       for (; np != 0; np = np->allnext)
+               if (of_node_get(np))
+                       break;
+       if (prev)
+               of_node_put(prev);
+       read_unlock(&devtree_lock);
+       return np;
+}
+EXPORT_SYMBOL(of_find_all_nodes);
+
+/**
+ *     of_get_parent - Get a node's parent if any
+ *     @node:  Node to get parent
+ *
+ *     Returns a node pointer with refcount incremented, use
+ *     of_node_put() on it when done.
+ */
+struct device_node *of_get_parent(const struct device_node *node)
+{
+       struct device_node *np;
+
+       if (!node)
+               return NULL;
+
+       read_lock(&devtree_lock);
+       np = of_node_get(node->parent);
+       read_unlock(&devtree_lock);
+       return np;
+}
+EXPORT_SYMBOL(of_get_parent);
+
+/**
+ *     of_get_next_child - Iterate a node childs
+ *     @node:  parent node
+ *     @prev:  previous child of the parent node, or NULL to get first
+ *
+ *     Returns a node pointer with refcount incremented, use
+ *     of_node_put() on it when done.
+ */
+struct device_node *of_get_next_child(const struct device_node *node,
+       struct device_node *prev)
+{
+       struct device_node *next;
+
+       read_lock(&devtree_lock);
+       next = prev ? prev->sibling : node->child;
+       for (; next != 0; next = next->sibling)
+               if (of_node_get(next))
+                       break;
+       if (prev)
+               of_node_put(prev);
+       read_unlock(&devtree_lock);
+       return next;
+}
+EXPORT_SYMBOL(of_get_next_child);
+
+/**
+ *     of_node_get - Increment refcount of a node
+ *     @node:  Node to inc refcount, NULL is supported to
+ *             simplify writing of callers
+ *
+ *     Returns node.
+ */
+struct device_node *of_node_get(struct device_node *node)
+{
+       if (node)
+               kref_get(&node->kref);
+       return node;
+}
+EXPORT_SYMBOL(of_node_get);
+
+static inline struct device_node * kref_to_device_node(struct kref *kref)
+{
+       return container_of(kref, struct device_node, kref);
+}
+
+/**
+ *     of_node_release - release a dynamically allocated node
+ *     @kref:  kref element of the node to be released
+ *
+ *     In of_node_put() this function is passed to kref_put()
+ *     as the destructor.
+ */
+static void of_node_release(struct kref *kref)
+{
+       struct device_node *node = kref_to_device_node(kref);
+       struct property *prop = node->properties;
+
+       if (!OF_IS_DYNAMIC(node))
+               return;
+       while (prop) {
+               struct property *next = prop->next;
+               kfree(prop->name);
+               kfree(prop->value);
+               kfree(prop);
+               prop = next;
+       }
+       kfree(node->intrs);
+       kfree(node->addrs);
+       kfree(node->full_name);
+       kfree(node->data);
+       kfree(node);
+}
+
+/**
+ *     of_node_put - Decrement refcount of a node
+ *     @node:  Node to dec refcount, NULL is supported to
+ *             simplify writing of callers
+ *
+ */
+void of_node_put(struct device_node *node)
+{
+       if (node)
+               kref_put(&node->kref, of_node_release);
+}
+EXPORT_SYMBOL(of_node_put);
+
+/*
+ * Plug a device node into the tree and global list.
+ */
+void of_attach_node(struct device_node *np)
+{
+       write_lock(&devtree_lock);
+       np->sibling = np->parent->child;
+       np->allnext = allnodes;
+       np->parent->child = np;
+       allnodes = np;
+       write_unlock(&devtree_lock);
+}
+
+/*
+ * "Unplug" a node from the device tree.  The caller must hold
+ * a reference to the node.  The memory associated with the node
+ * is not freed until its refcount goes to zero.
+ */
+void of_detach_node(const struct device_node *np)
+{
+       struct device_node *parent;
+
+       write_lock(&devtree_lock);
+
+       parent = np->parent;
+
+       if (allnodes == np)
+               allnodes = np->allnext;
+       else {
+               struct device_node *prev;
+               for (prev = allnodes;
+                    prev->allnext != np;
+                    prev = prev->allnext)
+                       ;
+               prev->allnext = np->allnext;
+       }
+
+       if (parent->child == np)
+               parent->child = np->sibling;
+       else {
+               struct device_node *prevsib;
+               for (prevsib = np->parent->child;
+                    prevsib->sibling != np;
+                    prevsib = prevsib->sibling)
+                       ;
+               prevsib->sibling = np->sibling;
+       }
+
+       write_unlock(&devtree_lock);
+}
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Fix up the uninitialized fields in a new device node:
+ * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
+ *
+ * A lot of boot-time code is duplicated here, because functions such
+ * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
+ * slab allocator.
+ *
+ * This should probably be split up into smaller chunks.
+ */
+
+static int of_finish_dynamic_node(struct device_node *node,
+                                 unsigned long *unused1, int unused2,
+                                 int unused3, int unused4)
+{
+       struct device_node *parent = of_get_parent(node);
+       int err = 0;
+       phandle *ibm_phandle;
+
+       node->name = get_property(node, "name", NULL);
+       node->type = get_property(node, "device_type", NULL);
+
+       if (!parent) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       /* We don't support that function on PowerMac, at least
+        * not yet
+        */
+       if (systemcfg->platform == PLATFORM_POWERMAC)
+               return -ENODEV;
+
+       /* fix up new node's linux_phandle field */
+       if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
+               node->linux_phandle = *ibm_phandle;
+
+out:
+       of_node_put(parent);
+       return err;
+}
+
+static int prom_reconfig_notifier(struct notifier_block *nb,
+                                 unsigned long action, void *node)
+{
+       int err;
+
+       switch (action) {
+       case PSERIES_RECONFIG_ADD:
+               err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
+               if (err < 0) {
+                       printk(KERN_ERR "finish_node returned %d\n", err);
+                       err = NOTIFY_BAD;
+               }
+               break;
+       default:
+               err = NOTIFY_DONE;
+               break;
+       }
+       return err;
+}
+
+static struct notifier_block prom_reconfig_nb = {
+       .notifier_call = prom_reconfig_notifier,
+       .priority = 10, /* This one needs to run first */
+};
+
+static int __init prom_reconfig_setup(void)
+{
+       return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
+}
+__initcall(prom_reconfig_setup);
+#endif
+
+/*
+ * Find a property with a given name for a given node
+ * and return the value.
+ */
+unsigned char *get_property(struct device_node *np, const char *name,
+                           int *lenp)
+{
+       struct property *pp;
+
+       for (pp = np->properties; pp != 0; pp = pp->next)
+               if (strcmp(pp->name, name) == 0) {
+                       if (lenp != 0)
+                               *lenp = pp->length;
+                       return pp->value;
+               }
+       return NULL;
+}
+EXPORT_SYMBOL(get_property);
+
+/*
+ * Add a property to a node
+ */
+void prom_add_property(struct device_node* np, struct property* prop)
+{
+       struct property **next = &np->properties;
+
+       prop->next = NULL;      
+       while (*next)
+               next = &(*next)->next;
+       *next = prop;
+}
+
+/* I quickly hacked that one, check against spec ! */
+static inline unsigned long
+bus_space_to_resource_flags(unsigned int bus_space)
+{
+       u8 space = (bus_space >> 24) & 0xf;
+       if (space == 0)
+               space = 0x02;
+       if (space == 0x02)
+               return IORESOURCE_MEM;
+       else if (space == 0x01)
+               return IORESOURCE_IO;
+       else {
+               printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
+                       bus_space);
+               return 0;
+       }
+}
+
+static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
+                                                struct address_range *range)
+{
+       unsigned long mask;
+       int i;
+
+       /* Check this one */
+       mask = bus_space_to_resource_flags(range->space);
+       for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
+               if ((pdev->resource[i].flags & mask) == mask &&
+                       pdev->resource[i].start <= range->address &&
+                       pdev->resource[i].end > range->address) {
+                               if ((range->address + range->size - 1) > pdev->resource[i].end) {
+                                       /* Add better message */
+                                       printk(KERN_WARNING "PCI/OF resource overlap !\n");
+                                       return NULL;
+                               }
+                               break;
+                       }
+       }
+       if (i == DEVICE_COUNT_RESOURCE)
+               return NULL;
+       return &pdev->resource[i];
+}
+
+/*
+ * Request an OF device resource. Currently handles child of PCI devices,
+ * or other nodes attached to the root node. Ultimately, put some
+ * link to resources in the OF node.
+ */
+struct resource *request_OF_resource(struct device_node* node, int index,
+                                    const char* name_postfix)
+{
+       struct pci_dev* pcidev;
+       u8 pci_bus, pci_devfn;
+       unsigned long iomask;
+       struct device_node* nd;
+       struct resource* parent;
+       struct resource *res = NULL;
+       int nlen, plen;
+
+       if (index >= node->n_addrs)
+               goto fail;
+
+       /* Sanity check on bus space */
+       iomask = bus_space_to_resource_flags(node->addrs[index].space);
+       if (iomask & IORESOURCE_MEM)
+               parent = &iomem_resource;
+       else if (iomask & IORESOURCE_IO)
+               parent = &ioport_resource;
+       else
+               goto fail;
+
+       /* Find a PCI parent if any */
+       nd = node;
+       pcidev = NULL;
+       while (nd) {
+               if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
+                       pcidev = pci_find_slot(pci_bus, pci_devfn);
+               if (pcidev) break;
+               nd = nd->parent;
+       }
+       if (pcidev)
+               parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
+       if (!parent) {
+               printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
+                       node->name);
+               goto fail;
+       }
+
+       res = __request_region(parent, node->addrs[index].address,
+                              node->addrs[index].size, NULL);
+       if (!res)
+               goto fail;
+       nlen = strlen(node->name);
+       plen = name_postfix ? strlen(name_postfix) : 0;
+       res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
+       if (res->name) {
+               strcpy((char *)res->name, node->name);
+               if (plen)
+                       strcpy((char *)res->name+nlen, name_postfix);
+       }
+       return res;
+fail:
+       return NULL;
+}
+EXPORT_SYMBOL(request_OF_resource);
+
+int release_OF_resource(struct device_node *node, int index)
+{
+       struct pci_dev* pcidev;
+       u8 pci_bus, pci_devfn;
+       unsigned long iomask, start, end;
+       struct device_node* nd;
+       struct resource* parent;
+       struct resource *res = NULL;
+
+       if (index >= node->n_addrs)
+               return -EINVAL;
+
+       /* Sanity check on bus space */
+       iomask = bus_space_to_resource_flags(node->addrs[index].space);
+       if (iomask & IORESOURCE_MEM)
+               parent = &iomem_resource;
+       else if (iomask & IORESOURCE_IO)
+               parent = &ioport_resource;
+       else
+               return -EINVAL;
+
+       /* Find a PCI parent if any */
+       nd = node;
+       pcidev = NULL;
+       while(nd) {
+               if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
+                       pcidev = pci_find_slot(pci_bus, pci_devfn);
+               if (pcidev) break;
+               nd = nd->parent;
+       }
+       if (pcidev)
+               parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
+       if (!parent) {
+               printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
+                       node->name);
+               return -ENODEV;
+       }
+
+       /* Find us in the parent and its childs */
+       res = parent->child;
+       start = node->addrs[index].address;
+       end = start + node->addrs[index].size - 1;
+       while (res) {
+               if (res->start == start && res->end == end &&
+                   (res->flags & IORESOURCE_BUSY))
+                       break;
+               if (res->start <= start && res->end >= end)
+                       res = res->child;
+               else
+                       res = res->sibling;
+       }
+       if (!res)
+               return -ENODEV;
+
+       if (res->name) {
+               kfree(res->name);
+               res->name = NULL;
+       }
+       release_resource(res);
+       kfree(res);
+
+       return 0;
+}
+EXPORT_SYMBOL(release_OF_resource);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
new file mode 100644 (file)
index 0000000..095659d
--- /dev/null
@@ -0,0 +1,2065 @@
+/*
+ * Procedures for interfacing to Open Firmware.
+ *
+ * Paul Mackerras      August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ * 
+ *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ *    {engebret|bergner}@us.ibm.com 
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG_PROM
+
+#include <stdarg.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/initrd.h>
+#include <linux/bitops.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/pci.h>
+#include <asm/iommu.h>
+#include <asm/bootinfo.h>
+#include <asm/btext.h>
+#include <asm/sections.h>
+#include <asm/machdep.h>
+
+#ifdef CONFIG_LOGO_LINUX_CLUT224
+#include <linux/linux_logo.h>
+extern const struct linux_logo logo_linux_clut224;
+#endif
+
+/*
+ * Properties whose value is longer than this get excluded from our
+ * copy of the device tree. This value does need to be big enough to
+ * ensure that we don't lose things like the interrupt-map property
+ * on a PCI-PCI bridge.
+ */
+#define MAX_PROPERTY_LENGTH    (1UL * 1024 * 1024)
+
+/*
+ * Eventually bump that one up
+ */
+#define DEVTREE_CHUNK_SIZE     0x100000
+
+/*
+ * This is the size of the local memory reserve map that gets copied
+ * into the boot params passed to the kernel. That size is totally
+ * flexible as the kernel just reads the list until it encounters an
+ * entry with size 0, so it can be changed without breaking binary
+ * compatibility
+ */
+#define MEM_RESERVE_MAP_SIZE   8
+
+/*
+ * prom_init() is called very early on, before the kernel text
+ * and data have been mapped to KERNELBASE.  At this point the code
+ * is running at whatever address it has been loaded at.
+ * On ppc32 we compile with -mrelocatable, which means that references
+ * to extern and static variables get relocated automatically.
+ * On ppc64 we have to relocate the references explicitly with
+ * RELOC.  (Note that strings count as static variables.)
+ *
+ * Because OF may have mapped I/O devices into the area starting at
+ * KERNELBASE, particularly on CHRP machines, we can't safely call
+ * OF once the kernel has been mapped to KERNELBASE.  Therefore all
+ * OF calls must be done within prom_init().
+ *
+ * ADDR is used in calls to call_prom.  The 4th and following
+ * arguments to call_prom should be 32-bit values.
+ * On ppc64, 64 bit values are truncated to 32 bits (and
+ * fortunately don't get interpreted as two arguments).
+ */
+#ifdef CONFIG_PPC64
+#define RELOC(x)        (*PTRRELOC(&(x)))
+#define ADDR(x)                (u32) add_reloc_offset((unsigned long)(x))
+#else
+#define RELOC(x)       (x)
+#define ADDR(x)                (u32) (x)
+#endif
+
+#define PROM_BUG() do {                                                \
+        prom_printf("kernel BUG at %s line 0x%x!\n",           \
+                   RELOC(__FILE__), __LINE__);                 \
+        __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR);      \
+} while (0)
+
+#ifdef DEBUG_PROM
+#define prom_debug(x...)       prom_printf(x)
+#else
+#define prom_debug(x...)
+#endif
+
+#ifdef CONFIG_PPC32
+#define PLATFORM_POWERMAC      _MACH_Pmac
+#define PLATFORM_CHRP          _MACH_chrp
+#endif
+
+
+typedef u32 prom_arg_t;
+
+struct prom_args {
+        u32 service;
+        u32 nargs;
+        u32 nret;
+        prom_arg_t args[10];
+};
+
+struct prom_t {
+       ihandle root;
+       ihandle chosen;
+       int cpu;
+       ihandle stdout;
+};
+
+struct mem_map_entry {
+       unsigned long   base;
+       unsigned long   size;
+};
+
+typedef u32 cell_t;
+
+extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
+
+#ifdef CONFIG_PPC64
+extern void enter_prom(struct prom_args *args, unsigned long entry);
+#else
+static inline void enter_prom(struct prom_args *args, unsigned long entry)
+{
+       ((void (*)(struct prom_args *))entry)(args);
+}
+#endif
+
+extern void copy_and_flush(unsigned long dest, unsigned long src,
+                          unsigned long size, unsigned long offset);
+
+/* prom structure */
+static struct prom_t __initdata prom;
+
+static unsigned long prom_entry __initdata;
+
+#define PROM_SCRATCH_SIZE 256
+
+static char __initdata of_stdout_device[256];
+static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
+
+static unsigned long __initdata dt_header_start;
+static unsigned long __initdata dt_struct_start, dt_struct_end;
+static unsigned long __initdata dt_string_start, dt_string_end;
+
+static unsigned long __initdata prom_initrd_start, prom_initrd_end;
+
+#ifdef CONFIG_PPC64
+static int __initdata iommu_force_on;
+static int __initdata ppc64_iommu_off;
+static unsigned long __initdata prom_tce_alloc_start;
+static unsigned long __initdata prom_tce_alloc_end;
+#endif
+
+static int __initdata of_platform;
+
+static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
+
+static unsigned long __initdata prom_memory_limit;
+
+static unsigned long __initdata alloc_top;
+static unsigned long __initdata alloc_top_high;
+static unsigned long __initdata alloc_bottom;
+static unsigned long __initdata rmo_top;
+static unsigned long __initdata ram_top;
+
+static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
+static int __initdata mem_reserve_cnt;
+
+static cell_t __initdata regbuf[1024];
+
+
+#define MAX_CPU_THREADS 2
+
+/* TO GO */
+#ifdef CONFIG_HMT
+struct {
+       unsigned int pir;
+       unsigned int threadid;
+} hmt_thread_data[NR_CPUS];
+#endif /* CONFIG_HMT */
+
+/*
+ * Error results ... some OF calls will return "-1" on error, some
+ * will return 0, some will return either. To simplify, here are
+ * macros to use with any ihandle or phandle return value to check if
+ * it is valid
+ */
+
+#define PROM_ERROR             (-1u)
+#define PHANDLE_VALID(p)       ((p) != 0 && (p) != PROM_ERROR)
+#define IHANDLE_VALID(i)       ((i) != 0 && (i) != PROM_ERROR)
+
+
+/* This is the one and *ONLY* place where we actually call open
+ * firmware.
+ */
+
+static int __init call_prom(const char *service, int nargs, int nret, ...)
+{
+       int i;
+       struct prom_args args;
+       va_list list;
+
+       args.service = ADDR(service);
+       args.nargs = nargs;
+       args.nret = nret;
+
+       va_start(list, nret);
+       for (i = 0; i < nargs; i++)
+               args.args[i] = va_arg(list, prom_arg_t);
+       va_end(list);
+
+       for (i = 0; i < nret; i++)
+               args.args[nargs+i] = 0;
+
+       enter_prom(&args, RELOC(prom_entry));
+
+       return (nret > 0) ? args.args[nargs] : 0;
+}
+
+static int __init call_prom_ret(const char *service, int nargs, int nret,
+                               prom_arg_t *rets, ...)
+{
+       int i;
+       struct prom_args args;
+       va_list list;
+
+       args.service = ADDR(service);
+       args.nargs = nargs;
+       args.nret = nret;
+
+       va_start(list, rets);
+       for (i = 0; i < nargs; i++)
+               args.args[i] = va_arg(list, prom_arg_t);
+       va_end(list);
+
+       for (i = 0; i < nret; i++)
+               rets[nargs+i] = 0;
+
+       enter_prom(&args, RELOC(prom_entry));
+
+       if (rets != NULL)
+               for (i = 1; i < nret; ++i)
+                       rets[i-1] = args.args[nargs+i];
+
+       return (nret > 0) ? args.args[nargs] : 0;
+}
+
+
+static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
+                               unsigned long align)
+{
+       return (unsigned int)call_prom("claim", 3, 1,
+                                      (prom_arg_t)virt, (prom_arg_t)size,
+                                      (prom_arg_t)align);
+}
+
+static void __init prom_print(const char *msg)
+{
+       const char *p, *q;
+       struct prom_t *_prom = &RELOC(prom);
+
+       if (_prom->stdout == 0)
+               return;
+
+       for (p = msg; *p != 0; p = q) {
+               for (q = p; *q != 0 && *q != '\n'; ++q)
+                       ;
+               if (q > p)
+                       call_prom("write", 3, 1, _prom->stdout, p, q - p);
+               if (*q == 0)
+                       break;
+               ++q;
+               call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
+       }
+}
+
+
+static void __init prom_print_hex(unsigned long val)
+{
+       int i, nibbles = sizeof(val)*2;
+       char buf[sizeof(val)*2+1];
+       struct prom_t *_prom = &RELOC(prom);
+
+       for (i = nibbles-1;  i >= 0;  i--) {
+               buf[i] = (val & 0xf) + '0';
+               if (buf[i] > '9')
+                       buf[i] += ('a'-'0'-10);
+               val >>= 4;
+       }
+       buf[nibbles] = '\0';
+       call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
+}
+
+
+static void __init prom_printf(const char *format, ...)
+{
+       const char *p, *q, *s;
+       va_list args;
+       unsigned long v;
+       struct prom_t *_prom = &RELOC(prom);
+
+       va_start(args, format);
+#ifdef CONFIG_PPC64
+       format = PTRRELOC(format);
+#endif
+       for (p = format; *p != 0; p = q) {
+               for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
+                       ;
+               if (q > p)
+                       call_prom("write", 3, 1, _prom->stdout, p, q - p);
+               if (*q == 0)
+                       break;
+               if (*q == '\n') {
+                       ++q;
+                       call_prom("write", 3, 1, _prom->stdout,
+                                 ADDR("\r\n"), 2);
+                       continue;
+               }
+               ++q;
+               if (*q == 0)
+                       break;
+               switch (*q) {
+               case 's':
+                       ++q;
+                       s = va_arg(args, const char *);
+                       prom_print(s);
+                       break;
+               case 'x':
+                       ++q;
+                       v = va_arg(args, unsigned long);
+                       prom_print_hex(v);
+                       break;
+               }
+       }
+}
+
+
+static void __init __attribute__((noreturn)) prom_panic(const char *reason)
+{
+#ifdef CONFIG_PPC64
+       reason = PTRRELOC(reason);
+#endif
+       prom_print(reason);
+       /* ToDo: should put up an SRC here on p/iSeries */
+       call_prom("exit", 0, 0);
+
+       for (;;)                        /* should never get here */
+               ;
+}
+
+
+static int __init prom_next_node(phandle *nodep)
+{
+       phandle node;
+
+       if ((node = *nodep) != 0
+           && (*nodep = call_prom("child", 1, 1, node)) != 0)
+               return 1;
+       if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
+               return 1;
+       for (;;) {
+               if ((node = call_prom("parent", 1, 1, node)) == 0)
+                       return 0;
+               if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
+                       return 1;
+       }
+}
+
+static int __init prom_getprop(phandle node, const char *pname,
+                              void *value, size_t valuelen)
+{
+       return call_prom("getprop", 4, 1, node, ADDR(pname),
+                        (u32)(unsigned long) value, (u32) valuelen);
+}
+
+static int __init prom_getproplen(phandle node, const char *pname)
+{
+       return call_prom("getproplen", 2, 1, node, ADDR(pname));
+}
+
+static int __init prom_setprop(phandle node, const char *pname,
+                              void *value, size_t valuelen)
+{
+       return call_prom("setprop", 4, 1, node, ADDR(pname),
+                        (u32)(unsigned long) value, (u32) valuelen);
+}
+
+/* We can't use the standard versions because of RELOC headaches. */
+#define isxdigit(c)    (('0' <= (c) && (c) <= '9') \
+                        || ('a' <= (c) && (c) <= 'f') \
+                        || ('A' <= (c) && (c) <= 'F'))
+
+#define isdigit(c)     ('0' <= (c) && (c) <= '9')
+#define islower(c)     ('a' <= (c) && (c) <= 'z')
+#define toupper(c)     (islower(c) ? ((c) - 'a' + 'A') : (c))
+
+unsigned long prom_strtoul(const char *cp, const char **endp)
+{
+       unsigned long result = 0, base = 10, value;
+
+       if (*cp == '0') {
+               base = 8;
+               cp++;
+               if (toupper(*cp) == 'X') {
+                       cp++;
+                       base = 16;
+               }
+       }
+
+       while (isxdigit(*cp) &&
+              (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
+               result = result * base + value;
+               cp++;
+       }
+
+       if (endp)
+               *endp = cp;
+
+       return result;
+}
+
+unsigned long prom_memparse(const char *ptr, const char **retptr)
+{
+       unsigned long ret = prom_strtoul(ptr, retptr);
+       int shift = 0;
+
+       /*
+        * We can't use a switch here because GCC *may* generate a
+        * jump table which won't work, because we're not running at
+        * the address we're linked at.
+        */
+       if ('G' == **retptr || 'g' == **retptr)
+               shift = 30;
+
+       if ('M' == **retptr || 'm' == **retptr)
+               shift = 20;
+
+       if ('K' == **retptr || 'k' == **retptr)
+               shift = 10;
+
+       if (shift) {
+               ret <<= shift;
+               (*retptr)++;
+       }
+
+       return ret;
+}
+
+/*
+ * Early parsing of the command line passed to the kernel, used for
+ * "mem=x" and the options that affect the iommu
+ */
+static void __init early_cmdline_parse(void)
+{
+       struct prom_t *_prom = &RELOC(prom);
+       char *opt, *p;
+       int l = 0;
+
+       RELOC(prom_cmd_line[0]) = 0;
+       p = RELOC(prom_cmd_line);
+       if ((long)_prom->chosen > 0)
+               l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
+#ifdef CONFIG_CMDLINE
+       if (l == 0) /* dbl check */
+               strlcpy(RELOC(prom_cmd_line),
+                       RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
+#endif /* CONFIG_CMDLINE */
+       prom_printf("command line: %s\n", RELOC(prom_cmd_line));
+
+#ifdef CONFIG_PPC64
+       opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
+       if (opt) {
+               prom_printf("iommu opt is: %s\n", opt);
+               opt += 6;
+               while (*opt && *opt == ' ')
+                       opt++;
+               if (!strncmp(opt, RELOC("off"), 3))
+                       RELOC(ppc64_iommu_off) = 1;
+               else if (!strncmp(opt, RELOC("force"), 5))
+                       RELOC(iommu_force_on) = 1;
+       }
+#endif
+
+       opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
+       if (opt) {
+               opt += 4;
+               RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
+#ifdef CONFIG_PPC64
+               /* Align to 16 MB == size of ppc64 large page */
+               RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
+#endif
+       }
+}
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * To tell the firmware what our capabilities are, we have to pass
+ * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
+ * that contain structures that contain the actual values.
+ */
+static struct fake_elf {
+       Elf32_Ehdr      elfhdr;
+       Elf32_Phdr      phdr[2];
+       struct chrpnote {
+               u32     namesz;
+               u32     descsz;
+               u32     type;
+               char    name[8];        /* "PowerPC" */
+               struct chrpdesc {
+                       u32     real_mode;
+                       u32     real_base;
+                       u32     real_size;
+                       u32     virt_base;
+                       u32     virt_size;
+                       u32     load_base;
+               } chrpdesc;
+       } chrpnote;
+       struct rpanote {
+               u32     namesz;
+               u32     descsz;
+               u32     type;
+               char    name[24];       /* "IBM,RPA-Client-Config" */
+               struct rpadesc {
+                       u32     lpar_affinity;
+                       u32     min_rmo_size;
+                       u32     min_rmo_percent;
+                       u32     max_pft_size;
+                       u32     splpar;
+                       u32     min_load;
+                       u32     new_mem_def;
+                       u32     ignore_me;
+               } rpadesc;
+       } rpanote;
+} fake_elf = {
+       .elfhdr = {
+               .e_ident = { 0x7f, 'E', 'L', 'F',
+                            ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
+               .e_type = ET_EXEC,      /* yeah right */
+               .e_machine = EM_PPC,
+               .e_version = EV_CURRENT,
+               .e_phoff = offsetof(struct fake_elf, phdr),
+               .e_phentsize = sizeof(Elf32_Phdr),
+               .e_phnum = 2
+       },
+       .phdr = {
+               [0] = {
+                       .p_type = PT_NOTE,
+                       .p_offset = offsetof(struct fake_elf, chrpnote),
+                       .p_filesz = sizeof(struct chrpnote)
+               }, [1] = {
+                       .p_type = PT_NOTE,
+                       .p_offset = offsetof(struct fake_elf, rpanote),
+                       .p_filesz = sizeof(struct rpanote)
+               }
+       },
+       .chrpnote = {
+               .namesz = sizeof("PowerPC"),
+               .descsz = sizeof(struct chrpdesc),
+               .type = 0x1275,
+               .name = "PowerPC",
+               .chrpdesc = {
+                       .real_mode = ~0U,       /* ~0 means "don't care" */
+                       .real_base = ~0U,
+                       .real_size = ~0U,
+                       .virt_base = ~0U,
+                       .virt_size = ~0U,
+                       .load_base = ~0U
+               },
+       },
+       .rpanote = {
+               .namesz = sizeof("IBM,RPA-Client-Config"),
+               .descsz = sizeof(struct rpadesc),
+               .type = 0x12759999,
+               .name = "IBM,RPA-Client-Config",
+               .rpadesc = {
+                       .lpar_affinity = 0,
+                       .min_rmo_size = 64,     /* in megabytes */
+                       .min_rmo_percent = 0,
+                       .max_pft_size = 48,     /* 2^48 bytes max PFT size */
+                       .splpar = 1,
+                       .min_load = ~0U,
+                       .new_mem_def = 0
+               }
+       }
+};
+
+static void __init prom_send_capabilities(void)
+{
+       ihandle elfloader;
+
+       elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
+       if (elfloader == 0) {
+               prom_printf("couldn't open /packages/elf-loader\n");
+               return;
+       }
+       call_prom("call-method", 3, 1, ADDR("process-elf-header"),
+                       elfloader, ADDR(&fake_elf));
+       call_prom("close", 1, 0, elfloader);
+}
+#endif
+
+/*
+ * Memory allocation strategy... our layout is normally:
+ *
+ *  at 14Mb or more we have vmlinux, then a gap and initrd.  In some
+ *  rare cases, initrd might end up being before the kernel though.
+ *  We assume this won't override the final kernel at 0, we have no
+ *  provision to handle that in this version, but it should hopefully
+ *  never happen.
+ *
+ *  alloc_top is set to the top of RMO, eventually shrink down if the
+ *  TCEs overlap
+ *
+ *  alloc_bottom is set to the top of kernel/initrd
+ *
+ *  from there, allocations are done this way : rtas is allocated
+ *  topmost, and the device-tree is allocated from the bottom. We try
+ *  to grow the device-tree allocation as we progress. If we can't,
+ *  then we fail, we don't currently have a facility to restart
+ *  elsewhere, but that shouldn't be necessary.
+ *
+ *  Note that calls to reserve_mem have to be done explicitly, memory
+ *  allocated with either alloc_up or alloc_down isn't automatically
+ *  reserved.
+ */
+
+
+/*
+ * Allocates memory in the RMO upward from the kernel/initrd
+ *
+ * When align is 0, this is a special case, it means to allocate in place
+ * at the current location of alloc_bottom or fail (that is basically
+ * extending the previous allocation). Used for the device-tree flattening
+ */
+static unsigned long __init alloc_up(unsigned long size, unsigned long align)
+{
+       unsigned long base = _ALIGN_UP(RELOC(alloc_bottom), align);
+       unsigned long addr = 0;
+
+       prom_debug("alloc_up(%x, %x)\n", size, align);
+       if (RELOC(ram_top) == 0)
+               prom_panic("alloc_up() called with mem not initialized\n");
+
+       if (align)
+               base = _ALIGN_UP(RELOC(alloc_bottom), align);
+       else
+               base = RELOC(alloc_bottom);
+
+       for(; (base + size) <= RELOC(alloc_top); 
+           base = _ALIGN_UP(base + 0x100000, align)) {
+               prom_debug("    trying: 0x%x\n\r", base);
+               addr = (unsigned long)prom_claim(base, size, 0);
+               if (addr != PROM_ERROR)
+                       break;
+               addr = 0;
+               if (align == 0)
+                       break;
+       }
+       if (addr == 0)
+               return 0;
+       RELOC(alloc_bottom) = addr;
+
+       prom_debug(" -> %x\n", addr);
+       prom_debug("  alloc_bottom : %x\n", RELOC(alloc_bottom));
+       prom_debug("  alloc_top    : %x\n", RELOC(alloc_top));
+       prom_debug("  alloc_top_hi : %x\n", RELOC(alloc_top_high));
+       prom_debug("  rmo_top      : %x\n", RELOC(rmo_top));
+       prom_debug("  ram_top      : %x\n", RELOC(ram_top));
+
+       return addr;
+}
+
+/*
+ * Allocates memory downward, either from top of RMO, or if highmem
+ * is set, from the top of RAM.  Note that this one doesn't handle
+ * failures.  It does claim memory if highmem is not set.
+ */
+static unsigned long __init alloc_down(unsigned long size, unsigned long align,
+                                      int highmem)
+{
+       unsigned long base, addr = 0;
+
+       prom_debug("alloc_down(%x, %x, %s)\n", size, align,
+                  highmem ? RELOC("(high)") : RELOC("(low)"));
+       if (RELOC(ram_top) == 0)
+               prom_panic("alloc_down() called with mem not initialized\n");
+
+       if (highmem) {
+               /* Carve out storage for the TCE table. */
+               addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
+               if (addr <= RELOC(alloc_bottom))
+                       return 0;
+               /* Will we bump into the RMO ? If yes, check out that we
+                * didn't overlap existing allocations there, if we did,
+                * we are dead, we must be the first in town !
+                */
+               if (addr < RELOC(rmo_top)) {
+                       /* Good, we are first */
+                       if (RELOC(alloc_top) == RELOC(rmo_top))
+                               RELOC(alloc_top) = RELOC(rmo_top) = addr;
+                       else
+                               return 0;
+               }
+               RELOC(alloc_top_high) = addr;
+               goto bail;
+       }
+
+       base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
+       for (; base > RELOC(alloc_bottom);
+            base = _ALIGN_DOWN(base - 0x100000, align))  {
+               prom_debug("    trying: 0x%x\n\r", base);
+               addr = (unsigned long)prom_claim(base, size, 0);
+               if (addr != PROM_ERROR)
+                       break;
+               addr = 0;
+       }
+       if (addr == 0)
+               return 0;
+       RELOC(alloc_top) = addr;
+
+ bail:
+       prom_debug(" -> %x\n", addr);
+       prom_debug("  alloc_bottom : %x\n", RELOC(alloc_bottom));
+       prom_debug("  alloc_top    : %x\n", RELOC(alloc_top));
+       prom_debug("  alloc_top_hi : %x\n", RELOC(alloc_top_high));
+       prom_debug("  rmo_top      : %x\n", RELOC(rmo_top));
+       prom_debug("  ram_top      : %x\n", RELOC(ram_top));
+
+       return addr;
+}
+
+/*
+ * Parse a "reg" cell
+ */
+static unsigned long __init prom_next_cell(int s, cell_t **cellp)
+{
+       cell_t *p = *cellp;
+       unsigned long r = 0;
+
+       /* Ignore more than 2 cells */
+       while (s > sizeof(unsigned long) / 4) {
+               p++;
+               s--;
+       }
+       r = *p++;
+#ifdef CONFIG_PPC64
+       if (s > 1) {
+               r <<= 32;
+               r |= *(p++);
+       }
+#endif
+       *cellp = p;
+       return r;
+}
+
+/*
+ * Very dumb function for adding to the memory reserve list, but
+ * we don't need anything smarter at this point
+ *
+ * XXX Eventually check for collisions.  They should NEVER happen.
+ * If problems seem to show up, it would be a good start to track
+ * them down.
+ */
+static void reserve_mem(unsigned long base, unsigned long size)
+{
+       unsigned long top = base + size;
+       unsigned long cnt = RELOC(mem_reserve_cnt);
+
+       if (size == 0)
+               return;
+
+       /* We need to always keep one empty entry so that we
+        * have our terminator with "size" set to 0 since we are
+        * dumb and just copy this entire array to the boot params
+        */
+       base = _ALIGN_DOWN(base, PAGE_SIZE);
+       top = _ALIGN_UP(top, PAGE_SIZE);
+       size = top - base;
+
+       if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
+               prom_panic("Memory reserve map exhausted !\n");
+       RELOC(mem_reserve_map)[cnt].base = base;
+       RELOC(mem_reserve_map)[cnt].size = size;
+       RELOC(mem_reserve_cnt) = cnt + 1;
+}
+
+/*
+ * Initialize memory allocation mecanism, parse "memory" nodes and
+ * obtain that way the top of memory and RMO to setup out local allocator
+ */
+static void __init prom_init_mem(void)
+{
+       phandle node;
+       char *path, type[64];
+       unsigned int plen;
+       cell_t *p, *endp;
+       struct prom_t *_prom = &RELOC(prom);
+       u32 rac, rsc;
+
+       /*
+        * We iterate the memory nodes to find
+        * 1) top of RMO (first node)
+        * 2) top of memory
+        */
+       rac = 2;
+       prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac));
+       rsc = 1;
+       prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc));
+       prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
+       prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
+
+       prom_debug("scanning memory:\n");
+       path = RELOC(prom_scratch);
+
+       for (node = 0; prom_next_node(&node); ) {
+               type[0] = 0;
+               prom_getprop(node, "device_type", type, sizeof(type));
+
+               if (strcmp(type, RELOC("memory")))
+                       continue;
+       
+               plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
+               if (plen > sizeof(regbuf)) {
+                       prom_printf("memory node too large for buffer !\n");
+                       plen = sizeof(regbuf);
+               }
+               p = RELOC(regbuf);
+               endp = p + (plen / sizeof(cell_t));
+
+#ifdef DEBUG_PROM
+               memset(path, 0, PROM_SCRATCH_SIZE);
+               call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
+               prom_debug("  node %s :\n", path);
+#endif /* DEBUG_PROM */
+
+               while ((endp - p) >= (rac + rsc)) {
+                       unsigned long base, size;
+
+                       base = prom_next_cell(rac, &p);
+                       size = prom_next_cell(rsc, &p);
+
+                       if (size == 0)
+                               continue;
+                       prom_debug("    %x %x\n", base, size);
+                       if (base == 0)
+                               RELOC(rmo_top) = size;
+                       if ((base + size) > RELOC(ram_top))
+                               RELOC(ram_top) = base + size;
+               }
+       }
+
+       RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
+
+       /* Check if we have an initrd after the kernel, if we do move our bottom
+        * point to after it
+        */
+       if (RELOC(prom_initrd_start)) {
+               if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
+                       RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
+       }
+
+       /*
+        * If prom_memory_limit is set we reduce the upper limits *except* for
+        * alloc_top_high. This must be the real top of RAM so we can put
+        * TCE's up there.
+        */
+
+       RELOC(alloc_top_high) = RELOC(ram_top);
+
+       if (RELOC(prom_memory_limit)) {
+               if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
+                       prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
+                               RELOC(prom_memory_limit));
+                       RELOC(prom_memory_limit) = 0;
+               } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
+                       prom_printf("Ignoring mem=%x >= ram_top.\n",
+                               RELOC(prom_memory_limit));
+                       RELOC(prom_memory_limit) = 0;
+               } else {
+                       RELOC(ram_top) = RELOC(prom_memory_limit);
+                       RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
+               }
+       }
+
+       /*
+        * Setup our top alloc point, that is top of RMO or top of
+        * segment 0 when running non-LPAR.
+        * Some RS64 machines have buggy firmware where claims up at
+        * 1GB fail.  Cap at 768MB as a workaround.
+        * Since 768MB is plenty of room, and we need to cap to something
+        * reasonable on 32-bit, cap at 768MB on all machines.
+        */
+       if (!RELOC(rmo_top))
+               RELOC(rmo_top) = RELOC(ram_top);
+       RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
+       RELOC(alloc_top) = RELOC(rmo_top);
+
+       prom_printf("memory layout at init:\n");
+       prom_printf("  memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
+       prom_printf("  alloc_bottom : %x\n", RELOC(alloc_bottom));
+       prom_printf("  alloc_top    : %x\n", RELOC(alloc_top));
+       prom_printf("  alloc_top_hi : %x\n", RELOC(alloc_top_high));
+       prom_printf("  rmo_top      : %x\n", RELOC(rmo_top));
+       prom_printf("  ram_top      : %x\n", RELOC(ram_top));
+}
+
+
+/*
+ * Allocate room for and instantiate RTAS
+ */
+static void __init prom_instantiate_rtas(void)
+{
+       phandle rtas_node;
+       ihandle rtas_inst;
+       u32 base, entry = 0;
+       u32 size = 0;
+
+       prom_debug("prom_instantiate_rtas: start...\n");
+
+       rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
+       prom_debug("rtas_node: %x\n", rtas_node);
+       if (!PHANDLE_VALID(rtas_node))
+               return;
+
+       prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
+       if (size == 0)
+               return;
+
+       base = alloc_down(size, PAGE_SIZE, 0);
+       if (base == 0) {
+               prom_printf("RTAS allocation failed !\n");
+               return;
+       }
+
+       rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
+       if (!IHANDLE_VALID(rtas_inst)) {
+               prom_printf("opening rtas package failed");
+               return;
+       }
+
+       prom_printf("instantiating rtas at 0x%x ...", base);
+
+       if (call_prom_ret("call-method", 3, 2, &entry,
+                         ADDR("instantiate-rtas"),
+                         rtas_inst, base) == PROM_ERROR
+           || entry == 0) {
+               prom_printf(" failed\n");
+               return;
+       }
+       prom_printf(" done\n");
+
+       reserve_mem(base, size);
+
+       prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
+       prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
+
+       prom_debug("rtas base     = 0x%x\n", base);
+       prom_debug("rtas entry    = 0x%x\n", entry);
+       prom_debug("rtas size     = 0x%x\n", (long)size);
+
+       prom_debug("prom_instantiate_rtas: end...\n");
+}
+
+#ifdef CONFIG_PPC64
+/*
+ * Allocate room for and initialize TCE tables
+ */
+static void __init prom_initialize_tce_table(void)
+{
+       phandle node;
+       ihandle phb_node;
+       char compatible[64], type[64], model[64];
+       char *path = RELOC(prom_scratch);
+       u64 base, align;
+       u32 minalign, minsize;
+       u64 tce_entry, *tce_entryp;
+       u64 local_alloc_top, local_alloc_bottom;
+       u64 i;
+
+       if (RELOC(ppc64_iommu_off))
+               return;
+
+       prom_debug("starting prom_initialize_tce_table\n");
+
+       /* Cache current top of allocs so we reserve a single block */
+       local_alloc_top = RELOC(alloc_top_high);
+       local_alloc_bottom = local_alloc_top;
+
+       /* Search all nodes looking for PHBs. */
+       for (node = 0; prom_next_node(&node); ) {
+               compatible[0] = 0;
+               type[0] = 0;
+               model[0] = 0;
+               prom_getprop(node, "compatible",
+                            compatible, sizeof(compatible));
+               prom_getprop(node, "device_type", type, sizeof(type));
+               prom_getprop(node, "model", model, sizeof(model));
+
+               if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
+                       continue;
+
+               /* Keep the old logic in tack to avoid regression. */
+               if (compatible[0] != 0) {
+                       if ((strstr(compatible, RELOC("python")) == NULL) &&
+                           (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
+                           (strstr(compatible, RELOC("Winnipeg")) == NULL))
+                               continue;
+               } else if (model[0] != 0) {
+                       if ((strstr(model, RELOC("ython")) == NULL) &&
+                           (strstr(model, RELOC("peedwagon")) == NULL) &&
+                           (strstr(model, RELOC("innipeg")) == NULL))
+                               continue;
+               }
+
+               if (prom_getprop(node, "tce-table-minalign", &minalign,
+                                sizeof(minalign)) == PROM_ERROR)
+                       minalign = 0;
+               if (prom_getprop(node, "tce-table-minsize", &minsize,
+                                sizeof(minsize)) == PROM_ERROR)
+                       minsize = 4UL << 20;
+
+               /*
+                * Even though we read what OF wants, we just set the table
+                * size to 4 MB.  This is enough to map 2GB of PCI DMA space.
+                * By doing this, we avoid the pitfalls of trying to DMA to
+                * MMIO space and the DMA alias hole.
+                *
+                * On POWER4, firmware sets the TCE region by assuming
+                * each TCE table is 8MB. Using this memory for anything
+                * else will impact performance, so we always allocate 8MB.
+                * Anton
+                */
+               if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
+                       minsize = 8UL << 20;
+               else
+                       minsize = 4UL << 20;
+
+               /* Align to the greater of the align or size */
+               align = max(minalign, minsize);
+               base = alloc_down(minsize, align, 1);
+               if (base == 0)
+                       prom_panic("ERROR, cannot find space for TCE table.\n");
+               if (base < local_alloc_bottom)
+                       local_alloc_bottom = base;
+
+               /* Save away the TCE table attributes for later use. */
+               prom_setprop(node, "linux,tce-base", &base, sizeof(base));
+               prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
+
+               /* It seems OF doesn't null-terminate the path :-( */
+               memset(path, 0, sizeof(path));
+               /* Call OF to setup the TCE hardware */
+               if (call_prom("package-to-path", 3, 1, node,
+                             path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
+                       prom_printf("package-to-path failed\n");
+               }
+
+               prom_debug("TCE table: %s\n", path);
+               prom_debug("\tnode = 0x%x\n", node);
+               prom_debug("\tbase = 0x%x\n", base);
+               prom_debug("\tsize = 0x%x\n", minsize);
+
+               /* Initialize the table to have a one-to-one mapping
+                * over the allocated size.
+                */
+               tce_entryp = (unsigned long *)base;
+               for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
+                       tce_entry = (i << PAGE_SHIFT);
+                       tce_entry |= 0x3;
+                       *tce_entryp = tce_entry;
+               }
+
+               prom_printf("opening PHB %s", path);
+               phb_node = call_prom("open", 1, 1, path);
+               if (phb_node == 0)
+                       prom_printf("... failed\n");
+               else
+                       prom_printf("... done\n");
+
+               call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
+                         phb_node, -1, minsize,
+                         (u32) base, (u32) (base >> 32));
+               call_prom("close", 1, 0, phb_node);
+       }
+
+       reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
+
+       if (RELOC(prom_memory_limit)) {
+               /*
+                * We align the start to a 16MB boundary so we can map
+                * the TCE area using large pages if possible.
+                * The end should be the top of RAM so no need to align it.
+                */
+               RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
+                                                         0x1000000);
+               RELOC(prom_tce_alloc_end) = local_alloc_top;
+       }
+
+       /* Flag the first invalid entry */
+       prom_debug("ending prom_initialize_tce_table\n");
+}
+#endif
+
+/*
+ * With CHRP SMP we need to use the OF to start the other processors.
+ * We can't wait until smp_boot_cpus (the OF is trashed by then)
+ * so we have to put the processors into a holding pattern controlled
+ * by the kernel (not OF) before we destroy the OF.
+ *
+ * This uses a chunk of low memory, puts some holding pattern
+ * code there and sends the other processors off to there until
+ * smp_boot_cpus tells them to do something.  The holding pattern
+ * checks that address until its cpu # is there, when it is that
+ * cpu jumps to __secondary_start().  smp_boot_cpus() takes care
+ * of setting those values.
+ *
+ * We also use physical address 0x4 here to tell when a cpu
+ * is in its holding pattern code.
+ *
+ * -- Cort
+ */
+static void __init prom_hold_cpus(void)
+{
+#ifdef CONFIG_PPC64
+       unsigned long i;
+       unsigned int reg;
+       phandle node;
+       char type[64];
+       int cpuid = 0;
+       unsigned int interrupt_server[MAX_CPU_THREADS];
+       unsigned int cpu_threads, hw_cpu_num;
+       int propsize;
+       extern void __secondary_hold(void);
+       extern unsigned long __secondary_hold_spinloop;
+       extern unsigned long __secondary_hold_acknowledge;
+       unsigned long *spinloop
+               = (void *) __pa(&__secondary_hold_spinloop);
+       unsigned long *acknowledge
+               = (void *) __pa(&__secondary_hold_acknowledge);
+#ifdef CONFIG_PPC64
+       unsigned long secondary_hold
+               = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
+#else
+       unsigned long secondary_hold = __pa(&__secondary_hold);
+#endif
+       struct prom_t *_prom = &RELOC(prom);
+
+       prom_debug("prom_hold_cpus: start...\n");
+       prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
+       prom_debug("    1) *spinloop      = 0x%x\n", *spinloop);
+       prom_debug("    1) acknowledge    = 0x%x\n",
+                  (unsigned long)acknowledge);
+       prom_debug("    1) *acknowledge   = 0x%x\n", *acknowledge);
+       prom_debug("    1) secondary_hold = 0x%x\n", secondary_hold);
+
+       /* Set the common spinloop variable, so all of the secondary cpus
+        * will block when they are awakened from their OF spinloop.
+        * This must occur for both SMP and non SMP kernels, since OF will
+        * be trashed when we move the kernel.
+        */
+       *spinloop = 0;
+
+#ifdef CONFIG_HMT
+       for (i = 0; i < NR_CPUS; i++) {
+               RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
+       }
+#endif
+       /* look for cpus */
+       for (node = 0; prom_next_node(&node); ) {
+               type[0] = 0;
+               prom_getprop(node, "device_type", type, sizeof(type));
+               if (strcmp(type, RELOC("cpu")) != 0)
+                       continue;
+
+               /* Skip non-configured cpus. */
+               if (prom_getprop(node, "status", type, sizeof(type)) > 0)
+                       if (strcmp(type, RELOC("okay")) != 0)
+                               continue;
+
+               reg = -1;
+               prom_getprop(node, "reg", &reg, sizeof(reg));
+
+               prom_debug("\ncpuid        = 0x%x\n", cpuid);
+               prom_debug("cpu hw idx   = 0x%x\n", reg);
+
+               /* Init the acknowledge var which will be reset by
+                * the secondary cpu when it awakens from its OF
+                * spinloop.
+                */
+               *acknowledge = (unsigned long)-1;
+
+               propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
+                                       &interrupt_server,
+                                       sizeof(interrupt_server));
+               if (propsize < 0) {
+                       /* no property.  old hardware has no SMT */
+                       cpu_threads = 1;
+                       interrupt_server[0] = reg; /* fake it with phys id */
+               } else {
+                       /* We have a threaded processor */
+                       cpu_threads = propsize / sizeof(u32);
+                       if (cpu_threads > MAX_CPU_THREADS) {
+                               prom_printf("SMT: too many threads!\n"
+                                           "SMT: found %x, max is %x\n",
+                                           cpu_threads, MAX_CPU_THREADS);
+                               cpu_threads = 1; /* ToDo: panic? */
+                       }
+               }
+
+               hw_cpu_num = interrupt_server[0];
+               if (hw_cpu_num != _prom->cpu) {
+                       /* Primary Thread of non-boot cpu */
+                       prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
+                       call_prom("start-cpu", 3, 0, node,
+                                 secondary_hold, reg);
+
+                       for ( i = 0 ; (i < 100000000) && 
+                             (*acknowledge == ((unsigned long)-1)); i++ )
+                               mb();
+
+                       if (*acknowledge == reg) {
+                               prom_printf("done\n");
+                               /* We have to get every CPU out of OF,
+                                * even if we never start it. */
+                               if (cpuid >= NR_CPUS)
+                                       goto next;
+                       } else {
+                               prom_printf("failed: %x\n", *acknowledge);
+                       }
+               }
+#ifdef CONFIG_SMP
+               else
+                       prom_printf("%x : boot cpu     %x\n", cpuid, reg);
+#endif
+next:
+#ifdef CONFIG_SMP
+               /* Init paca for secondary threads.   They start later. */
+               for (i=1; i < cpu_threads; i++) {
+                       cpuid++;
+                       if (cpuid >= NR_CPUS)
+                               continue;
+               }
+#endif /* CONFIG_SMP */
+               cpuid++;
+       }
+#ifdef CONFIG_HMT
+       /* Only enable HMT on processors that provide support. */
+       if (__is_processor(PV_PULSAR) || 
+           __is_processor(PV_ICESTAR) ||
+           __is_processor(PV_SSTAR)) {
+               prom_printf("    starting secondary threads\n");
+
+               for (i = 0; i < NR_CPUS; i += 2) {
+                       if (!cpu_online(i))
+                               continue;
+
+                       if (i == 0) {
+                               unsigned long pir = mfspr(SPRN_PIR);
+                               if (__is_processor(PV_PULSAR)) {
+                                       RELOC(hmt_thread_data)[i].pir = 
+                                               pir & 0x1f;
+                               } else {
+                                       RELOC(hmt_thread_data)[i].pir = 
+                                               pir & 0x3ff;
+                               }
+                       }
+               }
+       } else {
+               prom_printf("Processor is not HMT capable\n");
+       }
+#endif
+
+       if (cpuid > NR_CPUS)
+               prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
+                           ") exceeded: ignoring extras\n");
+
+       prom_debug("prom_hold_cpus: end...\n");
+#endif
+}
+
+
+static void __init prom_init_client_services(unsigned long pp)
+{
+       struct prom_t *_prom = &RELOC(prom);
+
+       /* Get a handle to the prom entry point before anything else */
+       RELOC(prom_entry) = pp;
+
+       /* get a handle for the stdout device */
+       _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
+       if (!PHANDLE_VALID(_prom->chosen))
+               prom_panic("cannot find chosen"); /* msg won't be printed :( */
+
+       /* get device tree root */
+       _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
+       if (!PHANDLE_VALID(_prom->root))
+               prom_panic("cannot find device tree root"); /* msg won't be printed :( */
+}
+
+static void __init prom_init_stdout(void)
+{
+       struct prom_t *_prom = &RELOC(prom);
+       char *path = RELOC(of_stdout_device);
+       char type[16];
+       u32 val;
+
+       if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
+               prom_panic("cannot find stdout");
+
+       _prom->stdout = val;
+
+       /* Get the full OF pathname of the stdout device */
+       memset(path, 0, 256);
+       call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
+       val = call_prom("instance-to-package", 1, 1, _prom->stdout);
+       prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
+       prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
+       prom_setprop(_prom->chosen, "linux,stdout-path",
+                    RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
+
+       /* If it's a display, note it */
+       memset(type, 0, sizeof(type));
+       prom_getprop(val, "device_type", type, sizeof(type));
+       if (strcmp(type, RELOC("display")) == 0)
+               prom_setprop(val, "linux,boot-display", NULL, 0);
+}
+
+static void __init prom_close_stdin(void)
+{
+       struct prom_t *_prom = &RELOC(prom);
+       ihandle val;
+
+       if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
+               call_prom("close", 1, 0, val);
+}
+
+static int __init prom_find_machine_type(void)
+{
+       struct prom_t *_prom = &RELOC(prom);
+       char compat[256];
+       int len, i = 0;
+       phandle rtas;
+
+       len = prom_getprop(_prom->root, "compatible",
+                          compat, sizeof(compat)-1);
+       if (len > 0) {
+               compat[len] = 0;
+               while (i < len) {
+                       char *p = &compat[i];
+                       int sl = strlen(p);
+                       if (sl == 0)
+                               break;
+                       if (strstr(p, RELOC("Power Macintosh")) ||
+                           strstr(p, RELOC("MacRISC4")))
+                               return PLATFORM_POWERMAC;
+#ifdef CONFIG_PPC64
+                       if (strstr(p, RELOC("Momentum,Maple")))
+                               return PLATFORM_MAPLE;
+#endif
+                       i += sl + 1;
+               }
+       }
+#ifdef CONFIG_PPC64
+       /* Default to pSeries. We need to know if we are running LPAR */
+       rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
+       if (PHANDLE_VALID(rtas)) {
+               int x = prom_getproplen(rtas, "ibm,hypertas-functions");
+               if (x != PROM_ERROR) {
+                       prom_printf("Hypertas detected, assuming LPAR !\n");
+                       return PLATFORM_PSERIES_LPAR;
+               }
+       }
+       return PLATFORM_PSERIES;
+#else
+       return PLATFORM_CHRP;
+#endif
+}
+
+static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
+{
+       return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
+}
+
+/*
+ * If we have a display that we don't know how to drive,
+ * we will want to try to execute OF's open method for it
+ * later.  However, OF will probably fall over if we do that
+ * we've taken over the MMU.
+ * So we check whether we will need to open the display,
+ * and if so, open it now.
+ */
+static void __init prom_check_displays(void)
+{
+       char type[16], *path;
+       phandle node;
+       ihandle ih;
+       int i;
+
+       static unsigned char default_colors[] = {
+               0x00, 0x00, 0x00,
+               0x00, 0x00, 0xaa,
+               0x00, 0xaa, 0x00,
+               0x00, 0xaa, 0xaa,
+               0xaa, 0x00, 0x00,
+               0xaa, 0x00, 0xaa,
+               0xaa, 0xaa, 0x00,
+               0xaa, 0xaa, 0xaa,
+               0x55, 0x55, 0x55,
+               0x55, 0x55, 0xff,
+               0x55, 0xff, 0x55,
+               0x55, 0xff, 0xff,
+               0xff, 0x55, 0x55,
+               0xff, 0x55, 0xff,
+               0xff, 0xff, 0x55,
+               0xff, 0xff, 0xff
+       };
+       const unsigned char *clut;
+
+       prom_printf("Looking for displays\n");
+       for (node = 0; prom_next_node(&node); ) {
+               memset(type, 0, sizeof(type));
+               prom_getprop(node, "device_type", type, sizeof(type));
+               if (strcmp(type, RELOC("display")) != 0)
+                       continue;
+
+               /* It seems OF doesn't null-terminate the path :-( */
+               path = RELOC(prom_scratch);
+               memset(path, 0, PROM_SCRATCH_SIZE);
+
+               /*
+                * leave some room at the end of the path for appending extra
+                * arguments
+                */
+               if (call_prom("package-to-path", 3, 1, node, path,
+                             PROM_SCRATCH_SIZE-10) == PROM_ERROR)
+                       continue;
+               prom_printf("found display   : %s, opening ... ", path);
+               
+               ih = call_prom("open", 1, 1, path);
+               if (ih == 0) {
+                       prom_printf("failed\n");
+                       continue;
+               }
+
+               /* Success */
+               prom_printf("done\n");
+               prom_setprop(node, "linux,opened", NULL, 0);
+
+               /* Setup a usable color table when the appropriate
+                * method is available. Should update this to set-colors */
+               clut = RELOC(default_colors);
+               for (i = 0; i < 32; i++, clut += 3)
+                       if (prom_set_color(ih, i, clut[0], clut[1],
+                                          clut[2]) != 0)
+                               break;
+
+#ifdef CONFIG_LOGO_LINUX_CLUT224
+               clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
+               for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
+                       if (prom_set_color(ih, i + 32, clut[0], clut[1],
+                                          clut[2]) != 0)
+                               break;
+#endif /* CONFIG_LOGO_LINUX_CLUT224 */
+       }
+}
+
+
+/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
+static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
+                             unsigned long needed, unsigned long align)
+{
+       void *ret;
+
+       *mem_start = _ALIGN(*mem_start, align);
+       while ((*mem_start + needed) > *mem_end) {
+               unsigned long room, chunk;
+
+               prom_debug("Chunk exhausted, claiming more at %x...\n",
+                          RELOC(alloc_bottom));
+               room = RELOC(alloc_top) - RELOC(alloc_bottom);
+               if (room > DEVTREE_CHUNK_SIZE)
+                       room = DEVTREE_CHUNK_SIZE;
+               if (room < PAGE_SIZE)
+                       prom_panic("No memory for flatten_device_tree (no room)");
+               chunk = alloc_up(room, 0);
+               if (chunk == 0)
+                       prom_panic("No memory for flatten_device_tree (claim failed)");
+               *mem_end = RELOC(alloc_top);
+       }
+
+       ret = (void *)*mem_start;
+       *mem_start += needed;
+
+       return ret;
+}
+
+#define dt_push_token(token, mem_start, mem_end) \
+       do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
+
+static unsigned long __init dt_find_string(char *str)
+{
+       char *s, *os;
+
+       s = os = (char *)RELOC(dt_string_start);
+       s += 4;
+       while (s <  (char *)RELOC(dt_string_end)) {
+               if (strcmp(s, str) == 0)
+                       return s - os;
+               s += strlen(s) + 1;
+       }
+       return 0;
+}
+
+/*
+ * The Open Firmware 1275 specification states properties must be 31 bytes or
+ * less, however not all firmwares obey this. Make it 64 bytes to be safe.
+ */
+#define MAX_PROPERTY_NAME 64
+
+static void __init scan_dt_build_strings(phandle node,
+                                        unsigned long *mem_start,
+                                        unsigned long *mem_end)
+{
+       char *prev_name, *namep, *sstart;
+       unsigned long soff;
+       phandle child;
+
+       sstart =  (char *)RELOC(dt_string_start);
+
+       /* get and store all property names */
+       prev_name = RELOC("");
+       for (;;) {
+               /* 64 is max len of name including nul. */
+               namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
+               if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
+                       /* No more nodes: unwind alloc */
+                       *mem_start = (unsigned long)namep;
+                       break;
+               }
+
+               /* skip "name" */
+               if (strcmp(namep, RELOC("name")) == 0) {
+                       *mem_start = (unsigned long)namep;
+                       prev_name = RELOC("name");
+                       continue;
+               }
+               /* get/create string entry */
+               soff = dt_find_string(namep);
+               if (soff != 0) {
+                       *mem_start = (unsigned long)namep;
+                       namep = sstart + soff;
+               } else {
+                       /* Trim off some if we can */
+                       *mem_start = (unsigned long)namep + strlen(namep) + 1;
+                       RELOC(dt_string_end) = *mem_start;
+               }
+               prev_name = namep;
+       }
+
+       /* do all our children */
+       child = call_prom("child", 1, 1, node);
+       while (child != 0) {
+               scan_dt_build_strings(child, mem_start, mem_end);
+               child = call_prom("peer", 1, 1, child);
+       }
+}
+
+static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
+                                       unsigned long *mem_end)
+{
+       phandle child;
+       char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
+       unsigned long soff;
+       unsigned char *valp;
+       static char pname[MAX_PROPERTY_NAME];
+       int l;
+
+       dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
+
+       /* get the node's full name */
+       namep = (char *)*mem_start;
+       l = call_prom("package-to-path", 3, 1, node,
+                     namep, *mem_end - *mem_start);
+       if (l >= 0) {
+               /* Didn't fit?  Get more room. */
+               if ((l+1) > (*mem_end - *mem_start)) {
+                       namep = make_room(mem_start, mem_end, l+1, 1);
+                       call_prom("package-to-path", 3, 1, node, namep, l);
+               }
+               namep[l] = '\0';
+
+               /* Fixup an Apple bug where they have bogus \0 chars in the
+                * middle of the path in some properties
+                */
+               for (p = namep, ep = namep + l; p < ep; p++)
+                       if (*p == '\0') {
+                               memmove(p, p+1, ep - p);
+                               ep--; l--; p--;
+                       }
+
+               /* now try to extract the unit name in that mess */
+               for (p = namep, lp = NULL; *p; p++)
+                       if (*p == '/')
+                               lp = p + 1;
+               if (lp != NULL)
+                       memmove(namep, lp, strlen(lp) + 1);
+               *mem_start = _ALIGN(((unsigned long) namep) +
+                                   strlen(namep) + 1, 4);
+       }
+
+       /* get it again for debugging */
+       path = RELOC(prom_scratch);
+       memset(path, 0, PROM_SCRATCH_SIZE);
+       call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
+
+       /* get and store all properties */
+       prev_name = RELOC("");
+       sstart = (char *)RELOC(dt_string_start);
+       for (;;) {
+               if (call_prom("nextprop", 3, 1, node, prev_name,
+                             RELOC(pname)) != 1)
+                       break;
+
+               /* skip "name" */
+               if (strcmp(RELOC(pname), RELOC("name")) == 0) {
+                       prev_name = RELOC("name");
+                       continue;
+               }
+
+               /* find string offset */
+               soff = dt_find_string(RELOC(pname));
+               if (soff == 0) {
+                       prom_printf("WARNING: Can't find string index for"
+                                   " <%s>, node %s\n", RELOC(pname), path);
+                       break;
+               }
+               prev_name = sstart + soff;
+
+               /* get length */
+               l = call_prom("getproplen", 2, 1, node, RELOC(pname));
+
+               /* sanity checks */
+               if (l == PROM_ERROR)
+                       continue;
+               if (l > MAX_PROPERTY_LENGTH) {
+                       prom_printf("WARNING: ignoring large property ");
+                       /* It seems OF doesn't null-terminate the path :-( */
+                       prom_printf("[%s] ", path);
+                       prom_printf("%s length 0x%x\n", RELOC(pname), l);
+                       continue;
+               }
+
+               /* push property head */
+               dt_push_token(OF_DT_PROP, mem_start, mem_end);
+               dt_push_token(l, mem_start, mem_end);
+               dt_push_token(soff, mem_start, mem_end);
+
+               /* push property content */
+               valp = make_room(mem_start, mem_end, l, 4);
+               call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
+               *mem_start = _ALIGN(*mem_start, 4);
+       }
+
+       /* Add a "linux,phandle" property. */
+       soff = dt_find_string(RELOC("linux,phandle"));
+       if (soff == 0)
+               prom_printf("WARNING: Can't find string index for"
+                           " <linux-phandle> node %s\n", path);
+       else {
+               dt_push_token(OF_DT_PROP, mem_start, mem_end);
+               dt_push_token(4, mem_start, mem_end);
+               dt_push_token(soff, mem_start, mem_end);
+               valp = make_room(mem_start, mem_end, 4, 4);
+               *(u32 *)valp = node;
+       }
+
+       /* do all our children */
+       child = call_prom("child", 1, 1, node);
+       while (child != 0) {
+               scan_dt_build_struct(child, mem_start, mem_end);
+               child = call_prom("peer", 1, 1, child);
+       }
+
+       dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
+}
+
+static void __init flatten_device_tree(void)
+{
+       phandle root;
+       unsigned long mem_start, mem_end, room;
+       struct boot_param_header *hdr;
+       struct prom_t *_prom = &RELOC(prom);
+       char *namep;
+       u64 *rsvmap;
+
+       /*
+        * Check how much room we have between alloc top & bottom (+/- a
+        * few pages), crop to 4Mb, as this is our "chuck" size
+        */
+       room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
+       if (room > DEVTREE_CHUNK_SIZE)
+               room = DEVTREE_CHUNK_SIZE;
+       prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
+
+       /* Now try to claim that */
+       mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
+       if (mem_start == 0)
+               prom_panic("Can't allocate initial device-tree chunk\n");
+       mem_end = RELOC(alloc_top);
+
+       /* Get root of tree */
+       root = call_prom("peer", 1, 1, (phandle)0);
+       if (root == (phandle)0)
+               prom_panic ("couldn't get device tree root\n");
+
+       /* Build header and make room for mem rsv map */ 
+       mem_start = _ALIGN(mem_start, 4);
+       hdr = make_room(&mem_start, &mem_end,
+                       sizeof(struct boot_param_header), 4);
+       RELOC(dt_header_start) = (unsigned long)hdr;
+       rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
+
+       /* Start of strings */
+       mem_start = PAGE_ALIGN(mem_start);
+       RELOC(dt_string_start) = mem_start;
+       mem_start += 4; /* hole */
+
+       /* Add "linux,phandle" in there, we'll need it */
+       namep = make_room(&mem_start, &mem_end, 16, 1);
+       strcpy(namep, RELOC("linux,phandle"));
+       mem_start = (unsigned long)namep + strlen(namep) + 1;
+
+       /* Build string array */
+       prom_printf("Building dt strings...\n"); 
+       scan_dt_build_strings(root, &mem_start, &mem_end);
+       RELOC(dt_string_end) = mem_start;
+
+       /* Build structure */
+       mem_start = PAGE_ALIGN(mem_start);
+       RELOC(dt_struct_start) = mem_start;
+       prom_printf("Building dt structure...\n"); 
+       scan_dt_build_struct(root, &mem_start, &mem_end);
+       dt_push_token(OF_DT_END, &mem_start, &mem_end);
+       RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
+
+       /* Finish header */
+       hdr->boot_cpuid_phys = _prom->cpu;
+       hdr->magic = OF_DT_HEADER;
+       hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
+       hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
+       hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
+       hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
+       hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
+       hdr->version = OF_DT_VERSION;
+       /* Version 16 is not backward compatible */
+       hdr->last_comp_version = 0x10;
+
+       /* Reserve the whole thing and copy the reserve map in, we
+        * also bump mem_reserve_cnt to cause further reservations to
+        * fail since it's too late.
+        */
+       reserve_mem(RELOC(dt_header_start), hdr->totalsize);
+       memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
+
+#ifdef DEBUG_PROM
+       {
+               int i;
+               prom_printf("reserved memory map:\n");
+               for (i = 0; i < RELOC(mem_reserve_cnt); i++)
+                       prom_printf("  %x - %x\n",
+                                   RELOC(mem_reserve_map)[i].base,
+                                   RELOC(mem_reserve_map)[i].size);
+       }
+#endif
+       RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
+
+       prom_printf("Device tree strings 0x%x -> 0x%x\n",
+                   RELOC(dt_string_start), RELOC(dt_string_end)); 
+       prom_printf("Device tree struct  0x%x -> 0x%x\n",
+                   RELOC(dt_struct_start), RELOC(dt_struct_end));
+
+}
+
+
+static void __init fixup_device_tree(void)
+{
+#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
+       phandle u3, i2c, mpic;
+       u32 u3_rev;
+       u32 interrupts[2];
+       u32 parent;
+
+       /* Some G5s have a missing interrupt definition, fix it up here */
+       u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
+       if (!PHANDLE_VALID(u3))
+               return;
+       i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
+       if (!PHANDLE_VALID(i2c))
+               return;
+       mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
+       if (!PHANDLE_VALID(mpic))
+               return;
+
+       /* check if proper rev of u3 */
+       if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
+           == PROM_ERROR)
+               return;
+       if (u3_rev != 0x35 && u3_rev != 0x37)
+               return;
+       /* does it need fixup ? */
+       if (prom_getproplen(i2c, "interrupts") > 0)
+               return;
+
+       prom_printf("fixing up bogus interrupts for u3 i2c...\n");
+
+       /* interrupt on this revision of u3 is number 0 and level */
+       interrupts[0] = 0;
+       interrupts[1] = 1;
+       prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
+       parent = (u32)mpic;
+       prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
+#endif
+}
+
+
+static void __init prom_find_boot_cpu(void)
+{
+               struct prom_t *_prom = &RELOC(prom);
+       u32 getprop_rval;
+       ihandle prom_cpu;
+       phandle cpu_pkg;
+
+       if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
+               prom_panic("cannot find boot cpu");
+
+       cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
+
+       prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
+       _prom->cpu = getprop_rval;
+
+       prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
+}
+
+static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+               struct prom_t *_prom = &RELOC(prom);
+
+       if (r3 && r4 && r4 != 0xdeadbeef) {
+               unsigned long val;
+
+               RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
+               RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
+
+               val = RELOC(prom_initrd_start);
+               prom_setprop(_prom->chosen, "linux,initrd-start", &val,
+                            sizeof(val));
+               val = RELOC(prom_initrd_end);
+               prom_setprop(_prom->chosen, "linux,initrd-end", &val,
+                            sizeof(val));
+
+               reserve_mem(RELOC(prom_initrd_start),
+                           RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
+
+               prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
+               prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
+       }
+#endif /* CONFIG_BLK_DEV_INITRD */
+}
+
+/*
+ * We enter here early on, when the Open Firmware prom is still
+ * handling exceptions and the MMU hash table for us.
+ */
+
+unsigned long __init prom_init(unsigned long r3, unsigned long r4,
+                              unsigned long pp,
+                              unsigned long r6, unsigned long r7)
+{      
+               struct prom_t *_prom;
+       extern char _stext[];
+       unsigned long hdr;
+       u32 getprop_rval;
+       unsigned long offset = reloc_offset();
+
+#ifdef CONFIG_PPC32
+       reloc_got2(offset);
+#endif
+
+       _prom = &RELOC(prom);
+
+       /*
+        * First zero the BSS
+        */
+       memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start);
+
+       /*
+        * Init interface to Open Firmware, get some node references,
+        * like /chosen
+        */
+       prom_init_client_services(pp);
+
+       /*
+        * Init prom stdout device
+        */
+       prom_init_stdout();
+
+       /*
+        * Check for an initrd
+        */
+       prom_check_initrd(r3, r4);
+
+       /*
+        * Get default machine type. At this point, we do not differentiate
+        * between pSeries SMP and pSeries LPAR
+        */
+       RELOC(of_platform) = prom_find_machine_type();
+       getprop_rval = RELOC(of_platform);
+       prom_setprop(_prom->chosen, "linux,platform",
+                    &getprop_rval, sizeof(getprop_rval));
+
+#ifdef CONFIG_PPC_PSERIES
+       /*
+        * On pSeries, inform the firmware about our capabilities
+        */
+       if (RELOC(of_platform) & PLATFORM_PSERIES)
+               prom_send_capabilities();
+#endif
+
+       /*
+        * On pSeries and BPA, copy the CPU hold code
+        */
+               if (RELOC(of_platform) != PLATFORM_POWERMAC)
+                       copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
+
+       /*
+        * Do early parsing of command line
+        */
+       early_cmdline_parse();
+
+       /*
+        * Initialize memory management within prom_init
+        */
+       prom_init_mem();
+
+       /*
+        * Determine which cpu is actually running right _now_
+        */
+       prom_find_boot_cpu();
+
+       /* 
+        * Initialize display devices
+        */
+       prom_check_displays();
+
+#ifdef CONFIG_PPC64
+       /*
+        * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
+        * that uses the allocator, we need to make sure we get the top of memory
+        * available for us here...
+        */
+       if (RELOC(of_platform) == PLATFORM_PSERIES)
+               prom_initialize_tce_table();
+#endif
+
+       /*
+        * On non-powermacs, try to instantiate RTAS and puts all CPUs
+        * in spin-loops. PowerMacs don't have a working RTAS and use
+        * a different way to spin CPUs
+        */
+       if (RELOC(of_platform) != PLATFORM_POWERMAC) {
+               prom_instantiate_rtas();
+               prom_hold_cpus();
+       }
+
+       /*
+        * Fill in some infos for use by the kernel later on
+        */
+       if (RELOC(prom_memory_limit))
+               prom_setprop(_prom->chosen, "linux,memory-limit",
+                            &RELOC(prom_memory_limit),
+                            sizeof(prom_memory_limit));
+#ifdef CONFIG_PPC64
+       if (RELOC(ppc64_iommu_off))
+               prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
+
+       if (RELOC(iommu_force_on))
+               prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
+
+       if (RELOC(prom_tce_alloc_start)) {
+               prom_setprop(_prom->chosen, "linux,tce-alloc-start",
+                            &RELOC(prom_tce_alloc_start),
+                            sizeof(prom_tce_alloc_start));
+               prom_setprop(_prom->chosen, "linux,tce-alloc-end",
+                            &RELOC(prom_tce_alloc_end),
+                            sizeof(prom_tce_alloc_end));
+       }
+#endif
+
+       /*
+        * Fixup any known bugs in the device-tree
+        */
+       fixup_device_tree();
+
+       /*
+        * Now finally create the flattened device-tree
+        */
+       prom_printf("copying OF device tree ...\n");
+       flatten_device_tree();
+
+       /* in case stdin is USB and still active on IBM machines... */
+       prom_close_stdin();
+
+       /*
+        * Call OF "quiesce" method to shut down pending DMA's from
+        * devices etc...
+        */
+       prom_printf("Calling quiesce ...\n");
+       call_prom("quiesce", 0, 0);
+
+       /*
+        * And finally, call the kernel passing it the flattened device
+        * tree and NULL as r5, thus triggering the new entry point which
+        * is common to us and kexec
+        */
+       hdr = RELOC(dt_header_start);
+       prom_printf("returning from prom_init\n");
+       prom_debug("->dt_header_start=0x%x\n", hdr);
+
+#ifdef CONFIG_PPC32
+       reloc_got2(-offset);
+#endif
+
+       __start(hdr, KERNELBASE + offset, 0);
+
+       return 0;
+}
similarity index 78%
rename from arch/ppc/kernel/ptrace.c
rename to arch/powerpc/kernel/ptrace.c
index e7aee41..943425a 100644 (file)
@@ -1,6 +1,4 @@
 /*
- *  arch/ppc/kernel/ptrace.c
- *
  *  PowerPC version
  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  *
  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
  *
  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
- * and Paul Mackerras (paulus@linuxcare.com.au).
+ * and Paul Mackerras (paulus@samba.org).
  *
  * This file is subject to the terms and conditions of the GNU General
  * Public License.  See the file README.legal in the main directory of
  * this archive for more details.
  */
 
+#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/signal.h>
 #include <linux/seccomp.h>
 #include <linux/audit.h>
+#ifdef CONFIG_PPC32
 #include <linux/module.h>
+#endif
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
+#ifdef CONFIG_PPC64
+#include <asm/ptrace-common.h>
+#endif
 
+#ifdef CONFIG_PPC32
 /*
  * Set of msr bits that gdb can change on behalf of a process.
  */
 #else
 #define MSR_DEBUGCHANGE        (MSR_SE | MSR_BE)
 #endif
+#endif /* CONFIG_PPC32 */
 
 /*
  * does not yet catch signals sent when the child dies.
  * in exit.c or in signal.c.
  */
 
+#ifdef CONFIG_PPC32
 /*
  * Get contents of register REGNO in task TASK.
  */
@@ -228,6 +235,7 @@ clear_single_step(struct task_struct *task)
 #endif
        }
 }
+#endif /* CONFIG_PPC32 */
 
 /*
  * Called by kernel/ptrace.c when detaching..
@@ -296,25 +304,28 @@ int sys_ptrace(long request, long pid, long addr, long data)
        }
 
        /* read the word at location addr in the USER area. */
-       /* XXX this will need fixing for 64-bit */
        case PTRACE_PEEKUSR: {
                unsigned long index, tmp;
 
                ret = -EIO;
                /* convert to index and check */
+#ifdef CONFIG_PPC32
                index = (unsigned long) addr >> 2;
-               if ((addr & 3) || index > PT_FPSCR
-                   || child->thread.regs == NULL)
+               if ((addr & 3) || (index > PT_FPSCR)
+                   || (child->thread.regs == NULL))
+#else
+               index = (unsigned long) addr >> 3;
+               if ((addr & 7) || (index > PT_FPSCR))
+#endif
                        break;
 
+#ifdef CONFIG_PPC32
                CHECK_FULL_REGS(child->thread.regs);
+#endif
                if (index < PT_FPR0) {
                        tmp = get_reg(child, (int) index);
                } else {
-                       preempt_disable();
-                       if (child->thread.regs->msr & MSR_FP)
-                               giveup_fpu(child);
-                       preempt_enable();
+                       flush_fp_to_thread(child);
                        tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
                }
                ret = put_user(tmp,(unsigned long __user *) data);
@@ -325,7 +336,8 @@ int sys_ptrace(long request, long pid, long addr, long data)
        case PTRACE_POKETEXT: /* write the word at location addr. */
        case PTRACE_POKEDATA:
                ret = 0;
-               if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
+               if (access_process_vm(child, addr, &data, sizeof(data), 1)
+                               == sizeof(data))
                        break;
                ret = -EIO;
                break;
@@ -336,21 +348,25 @@ int sys_ptrace(long request, long pid, long addr, long data)
 
                ret = -EIO;
                /* convert to index and check */
+#ifdef CONFIG_PPC32
                index = (unsigned long) addr >> 2;
-               if ((addr & 3) || index > PT_FPSCR
-                   || child->thread.regs == NULL)
+               if ((addr & 3) || (index > PT_FPSCR)
+                   || (child->thread.regs == NULL))
+#else
+               index = (unsigned long) addr >> 3;
+               if ((addr & 7) || (index > PT_FPSCR))
+#endif
                        break;
 
+#ifdef CONFIG_PPC32
                CHECK_FULL_REGS(child->thread.regs);
+#endif
                if (index == PT_ORIG_R3)
                        break;
                if (index < PT_FPR0) {
                        ret = put_reg(child, index, data);
                } else {
-                       preempt_disable();
-                       if (child->thread.regs->msr & MSR_FP)
-                               giveup_fpu(child);
-                       preempt_enable();
+                       flush_fp_to_thread(child);
                        ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
                        ret = 0;
                }
@@ -362,11 +378,10 @@ int sys_ptrace(long request, long pid, long addr, long data)
                ret = -EIO;
                if (!valid_signal(data))
                        break;
-               if (request == PTRACE_SYSCALL) {
+               if (request == PTRACE_SYSCALL)
                        set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               } else {
+               else
                        clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               }
                child->exit_code = data;
                /* make sure the single step bit is not set. */
                clear_single_step(child);
@@ -404,28 +419,102 @@ int sys_ptrace(long request, long pid, long addr, long data)
                break;
        }
 
+#ifdef CONFIG_PPC64
+       case PTRACE_GET_DEBUGREG: {
+               ret = -EINVAL;
+               /* We only support one DABR and no IABRS at the moment */
+               if (addr > 0)
+                       break;
+               ret = put_user(child->thread.dabr,
+                              (unsigned long __user *)data);
+               break;
+       }
+
+       case PTRACE_SET_DEBUGREG:
+               ret = ptrace_set_debugreg(child, addr, data);
+               break;
+#endif
+
        case PTRACE_DETACH:
                ret = ptrace_detach(child, data);
                break;
 
+#ifdef CONFIG_PPC64
+       case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
+               int i;
+               unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
+               unsigned long __user *tmp = (unsigned long __user *)addr;
+
+               for (i = 0; i < 32; i++) {
+                       ret = put_user(*reg, tmp);
+                       if (ret)
+                               break;
+                       reg++;
+                       tmp++;
+               }
+               break;
+       }
+
+       case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
+               int i;
+               unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
+               unsigned long __user *tmp = (unsigned long __user *)addr;
+
+               for (i = 0; i < 32; i++) {
+                       ret = get_user(*reg, tmp);
+                       if (ret)
+                               break;
+                       reg++;
+                       tmp++;
+               }
+               break;
+       }
+
+       case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
+               int i;
+               unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
+               unsigned long __user *tmp = (unsigned long __user *)addr;
+
+               flush_fp_to_thread(child);
+
+               for (i = 0; i < 32; i++) {
+                       ret = put_user(*reg, tmp);
+                       if (ret)
+                               break;
+                       reg++;
+                       tmp++;
+               }
+               break;
+       }
+
+       case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
+               int i;
+               unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
+               unsigned long __user *tmp = (unsigned long __user *)addr;
+
+               flush_fp_to_thread(child);
+
+               for (i = 0; i < 32; i++) {
+                       ret = get_user(*reg, tmp);
+                       if (ret)
+                               break;
+                       reg++;
+                       tmp++;
+               }
+               break;
+       }
+#endif /* CONFIG_PPC64 */
+
 #ifdef CONFIG_ALTIVEC
        case PTRACE_GETVRREGS:
                /* Get the child altivec register state. */
-               preempt_disable();
-               if (child->thread.regs->msr & MSR_VEC)
-                       giveup_altivec(child);
-               preempt_enable();
+               flush_altivec_to_thread(child);
                ret = get_vrregs((unsigned long __user *)data, child);
                break;
 
        case PTRACE_SETVRREGS:
                /* Set the child altivec register state. */
-               /* this is to clear the MSR_VEC bit to force a reload
-                * of register state from memory */
-               preempt_disable();
-               if (child->thread.regs->msr & MSR_VEC)
-                       giveup_altivec(child);
-               preempt_enable();
+               flush_altivec_to_thread(child);
                ret = set_vrregs(child, (unsigned long __user *)data);
                break;
 #endif
@@ -478,12 +567,21 @@ static void do_syscall_trace(void)
 
 void do_syscall_trace_enter(struct pt_regs *regs)
 {
+#ifdef CONFIG_PPC64
+       secure_computing(regs->gpr[0]);
+#endif
+
        if (test_thread_flag(TIF_SYSCALL_TRACE)
            && (current->ptrace & PT_PTRACED))
                do_syscall_trace();
 
        if (unlikely(current->audit_context))
-               audit_syscall_entry(current, AUDIT_ARCH_PPC,
+               audit_syscall_entry(current,
+#ifdef CONFIG_PPC32
+                                   AUDIT_ARCH_PPC,
+#else
+                                   test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64,
+#endif
                                    regs->gpr[0],
                                    regs->gpr[3], regs->gpr[4],
                                    regs->gpr[5], regs->gpr[6]);
@@ -491,17 +589,25 @@ void do_syscall_trace_enter(struct pt_regs *regs)
 
 void do_syscall_trace_leave(struct pt_regs *regs)
 {
+#ifdef CONFIG_PPC32
        secure_computing(regs->gpr[0]);
+#endif
 
        if (unlikely(current->audit_context))
                audit_syscall_exit(current,
                                   (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
                                   regs->result);
 
-       if ((test_thread_flag(TIF_SYSCALL_TRACE))
+       if ((test_thread_flag(TIF_SYSCALL_TRACE)
+#ifdef CONFIG_PPC64
+            || test_thread_flag(TIF_SINGLESTEP)
+#endif
+            )
            && (current->ptrace & PT_PTRACED))
                do_syscall_trace();
 }
 
+#ifdef CONFIG_PPC32
 EXPORT_SYMBOL(do_syscall_trace_enter);
 EXPORT_SYMBOL(do_syscall_trace_leave);
+#endif
similarity index 97%
rename from arch/ppc64/kernel/ptrace32.c
rename to arch/powerpc/kernel/ptrace32.c
index fb8c22d..5443682 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/arch/ppc64/kernel/ptrace32.c
+ * ptrace for 32-bit processes running on a 64-bit kernel.
  *
  *  PowerPC version
  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
  *
  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
- * and Paul Mackerras (paulus@linuxcare.com.au).
+ * and Paul Mackerras (paulus@samba.org).
  *
  * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file README.legal in the main directory of
+ * Public License.  See the file COPYING in the main directory of
  * this archive for more details.
  */
 
@@ -40,7 +40,8 @@
  * in exit.c or in signal.c.
  */
 
-int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
+int compat_sys_ptrace(int request, int pid, unsigned long addr,
+                     unsigned long data)
 {
        struct task_struct *child;
        int ret = -EPERM;
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c
new file mode 100644 (file)
index 0000000..2f8c3c9
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * PowerPC-specific semaphore code.
+ *
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
+ * to eliminate the SMP races in the old version between the updates
+ * of `count' and `waking'.  Now we use negative `count' values to
+ * indicate that some process(es) are waiting for the semaphore.
+ */
+
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+#include <asm/errno.h>
+
+/*
+ * Atomically update sem->count.
+ * This does the equivalent of the following:
+ *
+ *     old_count = sem->count;
+ *     tmp = MAX(old_count, 0) + incr;
+ *     sem->count = tmp;
+ *     return old_count;
+ */
+static inline int __sem_update_count(struct semaphore *sem, int incr)
+{
+       int old_count, tmp;
+
+       __asm__ __volatile__("\n"
+"1:    lwarx   %0,0,%3\n"
+"      srawi   %1,%0,31\n"
+"      andc    %1,%0,%1\n"
+"      add     %1,%1,%4\n"
+       PPC405_ERR77(0,%3)
+"      stwcx.  %1,0,%3\n"
+"      bne     1b"
+       : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
+       : "r" (&sem->count), "r" (incr), "m" (sem->count)
+       : "cc");
+
+       return old_count;
+}
+
+void __up(struct semaphore *sem)
+{
+       /*
+        * Note that we incremented count in up() before we came here,
+        * but that was ineffective since the result was <= 0, and
+        * any negative value of count is equivalent to 0.
+        * This ends up setting count to 1, unless count is now > 0
+        * (i.e. because some other cpu has called up() in the meantime),
+        * in which case we just increment count.
+        */
+       __sem_update_count(sem, 1);
+       wake_up(&sem->wait);
+}
+EXPORT_SYMBOL(__up);
+
+/*
+ * Note that when we come in to __down or __down_interruptible,
+ * we have already decremented count, but that decrement was
+ * ineffective since the result was < 0, and any negative value
+ * of count is equivalent to 0.
+ * Thus it is only when we decrement count from some value > 0
+ * that we have actually got the semaphore.
+ */
+void __sched __down(struct semaphore *sem)
+{
+       struct task_struct *tsk = current;
+       DECLARE_WAITQUEUE(wait, tsk);
+
+       __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+       add_wait_queue_exclusive(&sem->wait, &wait);
+
+       /*
+        * Try to get the semaphore.  If the count is > 0, then we've
+        * got the semaphore; we decrement count and exit the loop.
+        * If the count is 0 or negative, we set it to -1, indicating
+        * that we are asleep, and then sleep.
+        */
+       while (__sem_update_count(sem, -1) <= 0) {
+               schedule();
+               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+       }
+       remove_wait_queue(&sem->wait, &wait);
+       __set_task_state(tsk, TASK_RUNNING);
+
+       /*
+        * If there are any more sleepers, wake one of them up so
+        * that it can either get the semaphore, or set count to -1
+        * indicating that there are still processes sleeping.
+        */
+       wake_up(&sem->wait);
+}
+EXPORT_SYMBOL(__down);
+
+int __sched __down_interruptible(struct semaphore * sem)
+{
+       int retval = 0;
+       struct task_struct *tsk = current;
+       DECLARE_WAITQUEUE(wait, tsk);
+
+       __set_task_state(tsk, TASK_INTERRUPTIBLE);
+       add_wait_queue_exclusive(&sem->wait, &wait);
+
+       while (__sem_update_count(sem, -1) <= 0) {
+               if (signal_pending(current)) {
+                       /*
+                        * A signal is pending - give up trying.
+                        * Set sem->count to 0 if it is negative,
+                        * since we are no longer sleeping.
+                        */
+                       __sem_update_count(sem, 0);
+                       retval = -EINTR;
+                       break;
+               }
+               schedule();
+               set_task_state(tsk, TASK_INTERRUPTIBLE);
+       }
+       remove_wait_queue(&sem->wait, &wait);
+       __set_task_state(tsk, TASK_RUNNING);
+
+       wake_up(&sem->wait);
+       return retval;
+}
+EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
new file mode 100644 (file)
index 0000000..2d7fdeb
--- /dev/null
@@ -0,0 +1,652 @@
+/*
+ * Common prep/pmac/chrp boot and setup code.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/initrd.h>
+#include <linux/ide.h>
+#include <linux/tty.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/cpu.h>
+#include <linux/console.h>
+
+#include <asm/residual.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+#include <asm/amigappc.h>
+#include <asm/smp.h>
+#include <asm/elf.h>
+#include <asm/cputable.h>
+#include <asm/bootx.h>
+#include <asm/btext.h>
+#include <asm/machdep.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/pmac_feature.h>
+#include <asm/sections.h>
+#include <asm/nvram.h>
+#include <asm/xmon.h>
+#include <asm/ocp.h>
+
+#define USES_PPC_SYS (defined(CONFIG_85xx) || defined(CONFIG_83xx) || \
+                     defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \
+                     defined(CONFIG_PPC_MPC52xx))
+
+#if USES_PPC_SYS
+#include <asm/ppc_sys.h>
+#endif
+
+#if defined CONFIG_KGDB
+#include <asm/kgdb.h>
+#endif
+
+extern void platform_init(void);
+extern void bootx_init(unsigned long r4, unsigned long phys);
+
+extern void ppc6xx_idle(void);
+extern void power4_idle(void);
+
+boot_infos_t *boot_infos;
+struct ide_machdep_calls ppc_ide_md;
+
+/* Used with the BI_MEMSIZE bootinfo parameter to store the memory
+   size value reported by the boot loader. */
+unsigned long boot_mem_size;
+
+unsigned long ISA_DMA_THRESHOLD;
+unsigned int DMA_MODE_READ;
+unsigned int DMA_MODE_WRITE;
+
+int have_of = 1;
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+int _machine = 0;
+
+extern void prep_init(void);
+extern void pmac_init(void);
+extern void chrp_init(void);
+
+dev_t boot_dev;
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+#ifdef CONFIG_MAGIC_SYSRQ
+unsigned long SYSRQ_KEY = 0x54;
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+#ifdef CONFIG_VGA_CONSOLE
+unsigned long vgacon_remap_base;
+#endif
+
+struct machdep_calls ppc_md;
+EXPORT_SYMBOL(ppc_md);
+
+/*
+ * These are used in binfmt_elf.c to put aux entries on the stack
+ * for each elf executable being started.
+ */
+int dcache_bsize;
+int icache_bsize;
+int ucache_bsize;
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_FB_VGA16) || \
+    defined(CONFIG_FB_VGA16_MODULE) || defined(CONFIG_FB_VESA)
+struct screen_info screen_info = {
+       0, 25,                  /* orig-x, orig-y */
+       0,                      /* unused */
+       0,                      /* orig-video-page */
+       0,                      /* orig-video-mode */
+       80,                     /* orig-video-cols */
+       0,0,0,                  /* ega_ax, ega_bx, ega_cx */
+       25,                     /* orig-video-lines */
+       1,                      /* orig-video-isVGA */
+       16                      /* orig-video-points */
+};
+#endif /* CONFIG_VGA_CONSOLE || CONFIG_FB_VGA16 || CONFIG_FB_VESA */
+
+void machine_restart(char *cmd)
+{
+#ifdef CONFIG_NVRAM
+       nvram_sync();
+#endif
+       ppc_md.restart(cmd);
+}
+
+void machine_power_off(void)
+{
+#ifdef CONFIG_NVRAM
+       nvram_sync();
+#endif
+       ppc_md.power_off();
+}
+
+void machine_halt(void)
+{
+#ifdef CONFIG_NVRAM
+       nvram_sync();
+#endif
+       ppc_md.halt();
+}
+
+void (*pm_power_off)(void) = machine_power_off;
+
+#ifdef CONFIG_TAU
+extern u32 cpu_temp(unsigned long cpu);
+extern u32 cpu_temp_both(unsigned long cpu);
+#endif /* CONFIG_TAU */
+
+int show_cpuinfo(struct seq_file *m, void *v)
+{
+       int i = (int) v - 1;
+       unsigned int pvr;
+       unsigned short maj, min;
+       unsigned long lpj;
+
+       if (i >= NR_CPUS) {
+               /* Show summary information */
+#ifdef CONFIG_SMP
+               unsigned long bogosum = 0;
+               for (i = 0; i < NR_CPUS; ++i)
+                       if (cpu_online(i))
+                               bogosum += cpu_data[i].loops_per_jiffy;
+               seq_printf(m, "total bogomips\t: %lu.%02lu\n",
+                          bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
+#endif /* CONFIG_SMP */
+
+               if (ppc_md.show_cpuinfo != NULL)
+                       ppc_md.show_cpuinfo(m);
+               return 0;
+       }
+
+#ifdef CONFIG_SMP
+       if (!cpu_online(i))
+               return 0;
+       pvr = cpu_data[i].pvr;
+       lpj = cpu_data[i].loops_per_jiffy;
+#else
+       pvr = mfspr(SPRN_PVR);
+       lpj = loops_per_jiffy;
+#endif
+
+       seq_printf(m, "processor\t: %d\n", i);
+       seq_printf(m, "cpu\t\t: ");
+
+       if (cur_cpu_spec->pvr_mask)
+               seq_printf(m, "%s", cur_cpu_spec->cpu_name);
+       else
+               seq_printf(m, "unknown (%08x)", pvr);
+#ifdef CONFIG_ALTIVEC
+       if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
+               seq_printf(m, ", altivec supported");
+#endif
+       seq_printf(m, "\n");
+
+#ifdef CONFIG_TAU
+       if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
+#ifdef CONFIG_TAU_AVERAGE
+               /* more straightforward, but potentially misleading */
+               seq_printf(m,  "temperature \t: %u C (uncalibrated)\n",
+                          cpu_temp(i));
+#else
+               /* show the actual temp sensor range */
+               u32 temp;
+               temp = cpu_temp_both(i);
+               seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
+                          temp & 0xff, temp >> 16);
+#endif
+       }
+#endif /* CONFIG_TAU */
+
+       if (ppc_md.show_percpuinfo != NULL)
+               ppc_md.show_percpuinfo(m, i);
+
+       /* If we are a Freescale core do a simple check so
+        * we dont have to keep adding cases in the future */
+       if (PVR_VER(pvr) & 0x8000) {
+               maj = PVR_MAJ(pvr);
+               min = PVR_MIN(pvr);
+       } else {
+               switch (PVR_VER(pvr)) {
+                       case 0x0020:    /* 403 family */
+                               maj = PVR_MAJ(pvr) + 1;
+                               min = PVR_MIN(pvr);
+                               break;
+                       case 0x1008:    /* 740P/750P ?? */
+                               maj = ((pvr >> 8) & 0xFF) - 1;
+                               min = pvr & 0xFF;
+                               break;
+                       default:
+                               maj = (pvr >> 8) & 0xFF;
+                               min = pvr & 0xFF;
+                               break;
+               }
+       }
+
+       seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
+                  maj, min, PVR_VER(pvr), PVR_REV(pvr));
+
+       seq_printf(m, "bogomips\t: %lu.%02lu\n",
+                  lpj / (500000/HZ), (lpj / (5000/HZ)) % 100);
+
+#if USES_PPC_SYS
+       if (cur_ppc_sys_spec->ppc_sys_name)
+               seq_printf(m, "chipset\t\t: %s\n",
+                       cur_ppc_sys_spec->ppc_sys_name);
+#endif
+
+#ifdef CONFIG_SMP
+       seq_printf(m, "\n");
+#endif
+
+       return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+       int i = *pos;
+
+       return i <= NR_CPUS? (void *) (i + 1): NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       ++*pos;
+       return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+       .start =c_start,
+       .next = c_next,
+       .stop = c_stop,
+       .show = show_cpuinfo,
+};
+
+/*
+ * We're called here very early in the boot.  We determine the machine
+ * type and call the appropriate low-level setup functions.
+ *  -- Cort <cort@fsmlabs.com>
+ *
+ * Note that the kernel may be running at an address which is different
+ * from the address that it was linked at, so we must use RELOC/PTRRELOC
+ * to access static data (including strings).  -- paulus
+ */
+unsigned long __init early_init(unsigned long dt_ptr)
+{
+       unsigned long offset = reloc_offset();
+
+       /* First zero the BSS -- use memset_io, some platforms don't have
+        * caches on yet */
+       memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start);
+
+       /*
+        * Identify the CPU type and fix up code sections
+        * that depend on which cpu we have.
+        */
+       identify_cpu(offset, 0);
+       do_cpu_ftr_fixups(offset);
+
+       return KERNELBASE + offset;
+}
+
+#ifdef CONFIG_PPC_OF
+/*
+ * Assume here that all clock rates are the same in a
+ * smp system.  -- Cort
+ */
+int
+of_show_percpuinfo(struct seq_file *m, int i)
+{
+       struct device_node *cpu_node;
+       u32 *fp;
+       int s;
+       
+       cpu_node = find_type_devices("cpu");
+       if (!cpu_node)
+               return 0;
+       for (s = 0; s < i && cpu_node->next; s++)
+               cpu_node = cpu_node->next;
+       fp = (u32 *)get_property(cpu_node, "clock-frequency", NULL);
+       if (fp)
+               seq_printf(m, "clock\t\t: %dMHz\n", *fp / 1000000);
+       return 0;
+}
+
+void __init
+intuit_machine_type(void)
+{
+       char *model;
+       struct device_node *root;
+       
+       /* ask the OF info if we're a chrp or pmac */
+       root = find_path_device("/");
+       if (root != 0) {
+               /* assume pmac unless proven to be chrp -- Cort */
+               _machine = _MACH_Pmac;
+               model = get_property(root, "device_type", NULL);
+               if (model && !strncmp("chrp", model, 4))
+                       _machine = _MACH_chrp;
+               else {
+                       model = get_property(root, "model", NULL);
+                       if (model && !strncmp(model, "IBM", 3))
+                               _machine = _MACH_chrp;
+               }
+       }
+}
+#endif
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+/*
+ * The PPC_MULTIPLATFORM version of platform_init...
+ */
+void __init platform_init(void)
+{
+       /* if we didn't get any bootinfo telling us what we are... */
+       if (_machine == 0) {
+               /* prep boot loader tells us if we're prep or not */
+               if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) )
+                       _machine = _MACH_prep;
+       }
+
+#ifdef CONFIG_PPC_PREP
+       /* not much more to do here, if prep */
+       if (_machine == _MACH_prep) {
+               prep_init();
+               return;
+       }
+#endif
+
+#ifdef CONFIG_ADB
+       if (strstr(cmd_line, "adb_sync")) {
+               extern int __adb_probe_sync;
+               __adb_probe_sync = 1;
+       }
+#endif /* CONFIG_ADB */
+
+       switch (_machine) {
+#ifdef CONFIG_PPC_PMAC
+       case _MACH_Pmac:
+               pmac_init();
+               break;
+#endif
+#ifdef CONFIG_PPC_CHRP
+       case _MACH_chrp:
+               chrp_init();
+               break;
+#endif
+       }
+}
+
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+extern char *of_stdout_device;
+
+static int __init set_preferred_console(void)
+{
+       struct device_node *prom_stdout;
+       char *name;
+       int offset = 0;
+
+       if (of_stdout_device == NULL)
+               return -ENODEV;
+
+       /* The user has requested a console so this is already set up. */
+       if (strstr(saved_command_line, "console="))
+               return -EBUSY;
+
+       prom_stdout = find_path_device(of_stdout_device);
+       if (!prom_stdout)
+               return -ENODEV;
+
+       name = (char *)get_property(prom_stdout, "name", NULL);
+       if (!name)
+               return -ENODEV;
+
+       if (strcmp(name, "serial") == 0) {
+               int i;
+               u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
+               if (i > 8) {
+                       switch (reg[1]) {
+                               case 0x3f8:
+                                       offset = 0;
+                                       break;
+                               case 0x2f8:
+                                       offset = 1;
+                                       break;
+                               case 0x898:
+                                       offset = 2;
+                                       break;
+                               case 0x890:
+                                       offset = 3;
+                                       break;
+                               default:
+                                       /* We dont recognise the serial port */
+                                       return -ENODEV;
+                       }
+               }
+       } else if (strcmp(name, "ch-a") == 0)
+               offset = 0;
+       else if (strcmp(name, "ch-b") == 0)
+               offset = 1;
+       else
+               return -ENODEV;
+       return add_preferred_console("ttyS", offset, NULL);
+}
+console_initcall(set_preferred_console);
+#endif /* CONFIG_SERIAL_CORE_CONSOLE */
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+/*
+ * Find out what kind of machine we're on and save any data we need
+ * from the early boot process (devtree is copied on pmac by prom_init()).
+ * This is called very early on the boot process, after a minimal
+ * MMU environment has been set up but before MMU_init is called.
+ */
+void __init machine_init(unsigned long dt_ptr, unsigned long phys)
+{
+       early_init_devtree(__va(dt_ptr));
+
+#ifdef CONFIG_CMDLINE
+       strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
+#endif /* CONFIG_CMDLINE */
+
+       platform_init();
+
+#ifdef CONFIG_6xx
+       ppc_md.power_save = ppc6xx_idle;
+#endif
+
+       if (ppc_md.progress)
+               ppc_md.progress("id mach(): done", 0x200);
+}
+
+#ifdef CONFIG_BOOKE_WDT
+/* Checks wdt=x and wdt_period=xx command-line option */
+int __init early_parse_wdt(char *p)
+{
+       if (p && strncmp(p, "0", 1) != 0)
+              booke_wdt_enabled = 1;
+
+       return 0;
+}
+early_param("wdt", early_parse_wdt);
+
+int __init early_parse_wdt_period (char *p)
+{
+       if (p)
+               booke_wdt_period = simple_strtoul(p, NULL, 0);
+
+       return 0;
+}
+early_param("wdt_period", early_parse_wdt_period);
+#endif /* CONFIG_BOOKE_WDT */
+
+/* Checks "l2cr=xxxx" command-line option */
+int __init ppc_setup_l2cr(char *str)
+{
+       if (cpu_has_feature(CPU_FTR_L2CR)) {
+               unsigned long val = simple_strtoul(str, NULL, 0);
+               printk(KERN_INFO "l2cr set to %lx\n", val);
+               _set_L2CR(0);           /* force invalidate by disable cache */
+               _set_L2CR(val);         /* and enable it */
+       }
+       return 1;
+}
+__setup("l2cr=", ppc_setup_l2cr);
+
+#ifdef CONFIG_GENERIC_NVRAM
+
+/* Generic nvram hooks used by drivers/char/gen_nvram.c */
+unsigned char nvram_read_byte(int addr)
+{
+       if (ppc_md.nvram_read_val)
+               return ppc_md.nvram_read_val(addr);
+       return 0xff;
+}
+EXPORT_SYMBOL(nvram_read_byte);
+
+void nvram_write_byte(unsigned char val, int addr)
+{
+       if (ppc_md.nvram_write_val)
+               ppc_md.nvram_write_val(addr, val);
+}
+EXPORT_SYMBOL(nvram_write_byte);
+
+void nvram_sync(void)
+{
+       if (ppc_md.nvram_sync)
+               ppc_md.nvram_sync();
+}
+EXPORT_SYMBOL(nvram_sync);
+
+#endif /* CONFIG_NVRAM */
+
+static struct cpu cpu_devices[NR_CPUS];
+
+int __init ppc_init(void)
+{
+       int i;
+
+       /* clear the progress line */
+       if ( ppc_md.progress ) ppc_md.progress("             ", 0xffff);
+
+       /* register CPU devices */
+       for (i = 0; i < NR_CPUS; i++)
+               if (cpu_possible(i))
+                       register_cpu(&cpu_devices[i], i, NULL);
+
+       /* call platform init */
+       if (ppc_md.init != NULL) {
+               ppc_md.init();
+       }
+       return 0;
+}
+
+arch_initcall(ppc_init);
+
+/* Warning, IO base is not yet inited */
+void __init setup_arch(char **cmdline_p)
+{
+       extern char *klimit;
+       extern void do_init_bootmem(void);
+
+       /* so udelay does something sensible, assume <= 1000 bogomips */
+       loops_per_jiffy = 500000000 / HZ;
+
+       unflatten_device_tree();
+       finish_device_tree();
+
+#ifdef CONFIG_BOOTX_TEXT
+       init_boot_display();
+#endif
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+       /* This could be called "early setup arch", it must be done
+        * now because xmon need it
+        */
+       if (_machine == _MACH_Pmac)
+               pmac_feature_init();    /* New cool way */
+#endif
+
+#ifdef CONFIG_XMON
+       xmon_map_scc();
+       if (strstr(cmd_line, "xmon"))
+               xmon(NULL);
+#endif /* CONFIG_XMON */
+       if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
+
+#if defined(CONFIG_KGDB)
+       if (ppc_md.kgdb_map_scc)
+               ppc_md.kgdb_map_scc();
+       set_debug_traps();
+       if (strstr(cmd_line, "gdb")) {
+               if (ppc_md.progress)
+                       ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
+               printk("kgdb breakpoint activated\n");
+               breakpoint();
+       }
+#endif
+
+       /*
+        * Set cache line size based on type of cpu as a default.
+        * Systems with OF can look in the properties on the cpu node(s)
+        * for a possibly more accurate value.
+        */
+       if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
+               dcache_bsize = cur_cpu_spec->dcache_bsize;
+               icache_bsize = cur_cpu_spec->icache_bsize;
+               ucache_bsize = 0;
+       } else
+               ucache_bsize = dcache_bsize = icache_bsize
+                       = cur_cpu_spec->dcache_bsize;
+
+       /* reboot on panic */
+       panic_timeout = 180;
+
+       init_mm.start_code = PAGE_OFFSET;
+       init_mm.end_code = (unsigned long) _etext;
+       init_mm.end_data = (unsigned long) _edata;
+       init_mm.brk = (unsigned long) klimit;
+
+       /* Save unparsed command line copy for /proc/cmdline */
+       strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
+       *cmdline_p = cmd_line;
+
+       parse_early_param();
+
+       /* set up the bootmem stuff with available memory */
+       do_init_bootmem();
+       if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
+
+#ifdef CONFIG_PPC_OCP
+       /* Initialize OCP device list */
+       ocp_early_init();
+       if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab);
+#endif
+
+#ifdef CONFIG_DUMMY_CONSOLE
+       conswitchp = &dummy_con;
+#endif
+
+       ppc_md.setup_arch();
+       if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
+
+       paging_init();
+
+       /* this is for modules since _machine can be a define -- Cort */
+       ppc_md.ppc_machine = _machine;
+}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
new file mode 100644 (file)
index 0000000..0312422
--- /dev/null
@@ -0,0 +1,1307 @@
+/*
+ * 
+ * Common boot and setup code.
+ *
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/initrd.h>
+#include <linux/ide.h>
+#include <linux/seq_file.h>
+#include <linux/ioport.h>
+#include <linux/console.h>
+#include <linux/utsname.h>
+#include <linux/tty.h>
+#include <linux/root_dev.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/unistd.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/bootinfo.h>
+#include <asm/smp.h>
+#include <asm/elf.h>
+#include <asm/machdep.h>
+#include <asm/paca.h>
+#include <asm/ppcdebug.h>
+#include <asm/time.h>
+#include <asm/cputable.h>
+#include <asm/sections.h>
+#include <asm/btext.h>
+#include <asm/nvram.h>
+#include <asm/setup.h>
+#include <asm/system.h>
+#include <asm/rtas.h>
+#include <asm/iommu.h>
+#include <asm/serial.h>
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/lmb.h>
+#include <asm/iSeries/ItLpNaca.h>
+#include <asm/firmware.h>
+#include <asm/systemcfg.h>
+
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+/*
+ * Here are some early debugging facilities. You can enable one
+ * but your kernel will not boot on anything else if you do so
+ */
+
+/* This one is for use on LPAR machines that support an HVC console
+ * on vterm 0
+ */
+extern void udbg_init_debug_lpar(void);
+/* This one is for use on Apple G5 machines
+ */
+extern void udbg_init_pmac_realmode(void);
+/* That's RTAS panel debug */
+extern void call_rtas_display_status_delay(unsigned char c);
+/* Here's maple real mode debug */
+extern void udbg_init_maple_realmode(void);
+
+#define EARLY_DEBUG_INIT() do {} while(0)
+
+#if 0
+#define EARLY_DEBUG_INIT() udbg_init_debug_lpar()
+#define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
+#define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
+#define EARLY_DEBUG_INIT()                                             \
+       do { udbg_putc = call_rtas_display_status_delay; } while(0)
+#endif
+
+/* extern void *stab; */
+extern unsigned long klimit;
+
+extern void mm_init_ppc64(void);
+extern void stab_initialize(unsigned long stab);
+extern void htab_initialize(void);
+extern void early_init_devtree(void *flat_dt);
+extern void unflatten_device_tree(void);
+
+extern void smp_release_cpus(void);
+
+int have_of = 1;
+int boot_cpuid = 0;
+int boot_cpuid_phys = 0;
+dev_t boot_dev;
+u64 ppc64_pft_size;
+
+struct ppc64_caches ppc64_caches;
+EXPORT_SYMBOL_GPL(ppc64_caches);
+
+/*
+ * These are used in binfmt_elf.c to put aux entries on the stack
+ * for each elf executable being started.
+ */
+int dcache_bsize;
+int icache_bsize;
+int ucache_bsize;
+
+/* The main machine-dep calls structure
+ */
+struct machdep_calls ppc_md;
+EXPORT_SYMBOL(ppc_md);
+
+#ifdef CONFIG_MAGIC_SYSRQ
+unsigned long SYSRQ_KEY;
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+
+static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
+static struct notifier_block ppc64_panic_block = {
+       .notifier_call = ppc64_panic_event,
+       .priority = INT_MIN /* may not return; must be done last */
+};
+
+/*
+ * Perhaps we can put the pmac screen_info[] here
+ * on pmac as well so we don't need the ifdef's.
+ * Until we get multiple-console support in here
+ * that is.  -- Cort
+ * Maybe tie it to serial consoles, since this is really what
+ * these processors use on existing boards.  -- Dan
+ */ 
+struct screen_info screen_info = {
+       .orig_x = 0,
+       .orig_y = 25,
+       .orig_video_cols = 80,
+       .orig_video_lines = 25,
+       .orig_video_isVGA = 1,
+       .orig_video_points = 16
+};
+
+#ifdef CONFIG_SMP
+
+static int smt_enabled_cmdline;
+
+/* Look for ibm,smt-enabled OF option */
+static void check_smt_enabled(void)
+{
+       struct device_node *dn;
+       char *smt_option;
+
+       /* Allow the command line to overrule the OF option */
+       if (smt_enabled_cmdline)
+               return;
+
+       dn = of_find_node_by_path("/options");
+
+       if (dn) {
+               smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL);
+
+                if (smt_option) {
+                       if (!strcmp(smt_option, "on"))
+                               smt_enabled_at_boot = 1;
+                       else if (!strcmp(smt_option, "off"))
+                               smt_enabled_at_boot = 0;
+                }
+        }
+}
+
+/* Look for smt-enabled= cmdline option */
+static int __init early_smt_enabled(char *p)
+{
+       smt_enabled_cmdline = 1;
+
+       if (!p)
+               return 0;
+
+       if (!strcmp(p, "on") || !strcmp(p, "1"))
+               smt_enabled_at_boot = 1;
+       else if (!strcmp(p, "off") || !strcmp(p, "0"))
+               smt_enabled_at_boot = 0;
+
+       return 0;
+}
+early_param("smt-enabled", early_smt_enabled);
+
+/**
+ * setup_cpu_maps - initialize the following cpu maps:
+ *                  cpu_possible_map
+ *                  cpu_present_map
+ *                  cpu_sibling_map
+ *
+ * Having the possible map set up early allows us to restrict allocations
+ * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
+ *
+ * We do not initialize the online map here; cpus set their own bits in
+ * cpu_online_map as they come up.
+ *
+ * This function is valid only for Open Firmware systems.  finish_device_tree
+ * must be called before using this.
+ *
+ * While we're here, we may as well set the "physical" cpu ids in the paca.
+ */
+static void __init setup_cpu_maps(void)
+{
+       struct device_node *dn = NULL;
+       int cpu = 0;
+       int swap_cpuid = 0;
+
+       check_smt_enabled();
+
+       while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
+               u32 *intserv;
+               int j, len = sizeof(u32), nthreads;
+
+               intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
+                                             &len);
+               if (!intserv)
+                       intserv = (u32 *)get_property(dn, "reg", NULL);
+
+               nthreads = len / sizeof(u32);
+
+               for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
+                       cpu_set(cpu, cpu_present_map);
+                       set_hard_smp_processor_id(cpu, intserv[j]);
+
+                       if (intserv[j] == boot_cpuid_phys)
+                               swap_cpuid = cpu;
+                       cpu_set(cpu, cpu_possible_map);
+                       cpu++;
+               }
+       }
+
+       /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
+        * boot cpu is logical 0.
+        */
+       if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
+               u32 tmp;
+               tmp = get_hard_smp_processor_id(0);
+               set_hard_smp_processor_id(0, boot_cpuid_phys);
+               set_hard_smp_processor_id(swap_cpuid, tmp);
+       }
+
+       /*
+        * On pSeries LPAR, we need to know how many cpus
+        * could possibly be added to this partition.
+        */
+       if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
+                               (dn = of_find_node_by_path("/rtas"))) {
+               int num_addr_cell, num_size_cell, maxcpus;
+               unsigned int *ireg;
+
+               num_addr_cell = prom_n_addr_cells(dn);
+               num_size_cell = prom_n_size_cells(dn);
+
+               ireg = (unsigned int *)
+                       get_property(dn, "ibm,lrdr-capacity", NULL);
+
+               if (!ireg)
+                       goto out;
+
+               maxcpus = ireg[num_addr_cell + num_size_cell];
+
+               /* Double maxcpus for processors which have SMT capability */
+               if (cpu_has_feature(CPU_FTR_SMT))
+                       maxcpus *= 2;
+
+               if (maxcpus > NR_CPUS) {
+                       printk(KERN_WARNING
+                              "Partition configured for %d cpus, "
+                              "operating system maximum is %d.\n",
+                              maxcpus, NR_CPUS);
+                       maxcpus = NR_CPUS;
+               } else
+                       printk(KERN_INFO "Partition configured for %d cpus.\n",
+                              maxcpus);
+
+               for (cpu = 0; cpu < maxcpus; cpu++)
+                       cpu_set(cpu, cpu_possible_map);
+       out:
+               of_node_put(dn);
+       }
+
+       /*
+        * Do the sibling map; assume only two threads per processor.
+        */
+       for_each_cpu(cpu) {
+               cpu_set(cpu, cpu_sibling_map[cpu]);
+               if (cpu_has_feature(CPU_FTR_SMT))
+                       cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
+       }
+
+       systemcfg->processorCount = num_present_cpus();
+}
+#endif /* CONFIG_SMP */
+
+extern struct machdep_calls pSeries_md;
+extern struct machdep_calls pmac_md;
+extern struct machdep_calls maple_md;
+extern struct machdep_calls bpa_md;
+extern struct machdep_calls iseries_md;
+
+/* Ultimately, stuff them in an elf section like initcalls... */
+static struct machdep_calls __initdata *machines[] = {
+#ifdef CONFIG_PPC_PSERIES
+       &pSeries_md,
+#endif /* CONFIG_PPC_PSERIES */
+#ifdef CONFIG_PPC_PMAC
+       &pmac_md,
+#endif /* CONFIG_PPC_PMAC */
+#ifdef CONFIG_PPC_MAPLE
+       &maple_md,
+#endif /* CONFIG_PPC_MAPLE */
+#ifdef CONFIG_PPC_BPA
+       &bpa_md,
+#endif
+#ifdef CONFIG_PPC_ISERIES
+       &iseries_md,
+#endif
+       NULL
+};
+
+/*
+ * Early initialization entry point. This is called by head.S
+ * with MMU translation disabled. We rely on the "feature" of
+ * the CPU that ignores the top 2 bits of the address in real
+ * mode so we can access kernel globals normally provided we
+ * only toy with things in the RMO region. From here, we do
+ * some early parsing of the device-tree to setup out LMB
+ * data structures, and allocate & initialize the hash table
+ * and segment tables so we can start running with translation
+ * enabled.
+ *
+ * It is this function which will call the probe() callback of
+ * the various platform types and copy the matching one to the
+ * global ppc_md structure. Your platform can eventually do
+ * some very early initializations from the probe() routine, but
+ * this is not recommended, be very careful as, for example, the
+ * device-tree is not accessible via normal means at this point.
+ */
+
+void __init early_setup(unsigned long dt_ptr)
+{
+       struct paca_struct *lpaca = get_paca();
+       static struct machdep_calls **mach;
+
+       /*
+        * Enable early debugging if any specified (see top of
+        * this file)
+        */
+       EARLY_DEBUG_INIT();
+
+       DBG(" -> early_setup()\n");
+
+       /*
+        * Fill the default DBG level (do we want to keep
+        * that old mecanism around forever ?)
+        */
+       ppcdbg_initialize();
+
+       /*
+        * Do early initializations using the flattened device
+        * tree, like retreiving the physical memory map or
+        * calculating/retreiving the hash table size
+        */
+       early_init_devtree(__va(dt_ptr));
+
+       /*
+        * Iterate all ppc_md structures until we find the proper
+        * one for the current machine type
+        */
+       DBG("Probing machine type for platform %x...\n",
+           systemcfg->platform);
+
+       for (mach = machines; *mach; mach++) {
+               if ((*mach)->probe(systemcfg->platform))
+                       break;
+       }
+       /* What can we do if we didn't find ? */
+       if (*mach == NULL) {
+               DBG("No suitable machine found !\n");
+               for (;;);
+       }
+       ppc_md = **mach;
+
+       DBG("Found, Initializing memory management...\n");
+
+       /*
+        * Initialize stab / SLB management
+        */
+       if (!firmware_has_feature(FW_FEATURE_ISERIES))
+               stab_initialize(lpaca->stab_real);
+
+       /*
+        * Initialize the MMU Hash table and create the linear mapping
+        * of memory
+        */
+       htab_initialize();
+
+       DBG(" <- early_setup()\n");
+}
+
+
+/*
+ * Initialize some remaining members of the ppc64_caches and systemcfg structures
+ * (at least until we get rid of them completely). This is mostly some
+ * cache informations about the CPU that will be used by cache flush
+ * routines and/or provided to userland
+ */
+static void __init initialize_cache_info(void)
+{
+       struct device_node *np;
+       unsigned long num_cpus = 0;
+
+       DBG(" -> initialize_cache_info()\n");
+
+       for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
+               num_cpus += 1;
+
+               /* We're assuming *all* of the CPUs have the same
+                * d-cache and i-cache sizes... -Peter
+                */
+
+               if ( num_cpus == 1 ) {
+                       u32 *sizep, *lsizep;
+                       u32 size, lsize;
+                       const char *dc, *ic;
+
+                       /* Then read cache informations */
+                       if (systemcfg->platform == PLATFORM_POWERMAC) {
+                               dc = "d-cache-block-size";
+                               ic = "i-cache-block-size";
+                       } else {
+                               dc = "d-cache-line-size";
+                               ic = "i-cache-line-size";
+                       }
+
+                       size = 0;
+                       lsize = cur_cpu_spec->dcache_bsize;
+                       sizep = (u32 *)get_property(np, "d-cache-size", NULL);
+                       if (sizep != NULL)
+                               size = *sizep;
+                       lsizep = (u32 *) get_property(np, dc, NULL);
+                       if (lsizep != NULL)
+                               lsize = *lsizep;
+                       if (sizep == 0 || lsizep == 0)
+                               DBG("Argh, can't find dcache properties ! "
+                                   "sizep: %p, lsizep: %p\n", sizep, lsizep);
+
+                       systemcfg->dcache_size = ppc64_caches.dsize = size;
+                       systemcfg->dcache_line_size =
+                               ppc64_caches.dline_size = lsize;
+                       ppc64_caches.log_dline_size = __ilog2(lsize);
+                       ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
+
+                       size = 0;
+                       lsize = cur_cpu_spec->icache_bsize;
+                       sizep = (u32 *)get_property(np, "i-cache-size", NULL);
+                       if (sizep != NULL)
+                               size = *sizep;
+                       lsizep = (u32 *)get_property(np, ic, NULL);
+                       if (lsizep != NULL)
+                               lsize = *lsizep;
+                       if (sizep == 0 || lsizep == 0)
+                               DBG("Argh, can't find icache properties ! "
+                                   "sizep: %p, lsizep: %p\n", sizep, lsizep);
+
+                       systemcfg->icache_size = ppc64_caches.isize = size;
+                       systemcfg->icache_line_size =
+                               ppc64_caches.iline_size = lsize;
+                       ppc64_caches.log_iline_size = __ilog2(lsize);
+                       ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
+               }
+       }
+
+       /* Add an eye catcher and the systemcfg layout version number */
+       strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
+       systemcfg->version.major = SYSTEMCFG_MAJOR;
+       systemcfg->version.minor = SYSTEMCFG_MINOR;
+       systemcfg->processor = mfspr(SPRN_PVR);
+
+       DBG(" <- initialize_cache_info()\n");
+}
+
+static void __init check_for_initrd(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+       u64 *prop;
+
+       DBG(" -> check_for_initrd()\n");
+
+       if (of_chosen) {
+               prop = (u64 *)get_property(of_chosen,
+                               "linux,initrd-start", NULL);
+               if (prop != NULL) {
+                       initrd_start = (unsigned long)__va(*prop);
+                       prop = (u64 *)get_property(of_chosen,
+                                       "linux,initrd-end", NULL);
+                       if (prop != NULL) {
+                               initrd_end = (unsigned long)__va(*prop);
+                               initrd_below_start_ok = 1;
+                       } else
+                               initrd_start = 0;
+               }
+       }
+
+       /* If we were passed an initrd, set the ROOT_DEV properly if the values
+        * look sensible. If not, clear initrd reference.
+        */
+       if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
+           initrd_end > initrd_start)
+               ROOT_DEV = Root_RAM0;
+       else
+               initrd_start = initrd_end = 0;
+
+       if (initrd_start)
+               printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
+
+       DBG(" <- check_for_initrd()\n");
+#endif /* CONFIG_BLK_DEV_INITRD */
+}
+
+/*
+ * Do some initial setup of the system.  The parameters are those which 
+ * were passed in from the bootloader.
+ */
+void __init setup_system(void)
+{
+       DBG(" -> setup_system()\n");
+
+       /*
+        * Unflatten the device-tree passed by prom_init or kexec
+        */
+       unflatten_device_tree();
+
+       /*
+        * Fill the ppc64_caches & systemcfg structures with informations
+        * retreived from the device-tree. Need to be called before
+        * finish_device_tree() since the later requires some of the
+        * informations filled up here to properly parse the interrupt
+        * tree.
+        * It also sets up the cache line sizes which allows to call
+        * routines like flush_icache_range (used by the hash init
+        * later on).
+        */
+       initialize_cache_info();
+
+#ifdef CONFIG_PPC_RTAS
+       /*
+        * Initialize RTAS if available
+        */
+       rtas_initialize();
+#endif /* CONFIG_PPC_RTAS */
+
+       /*
+        * Check if we have an initrd provided via the device-tree
+        */
+       check_for_initrd();
+
+       /*
+        * Do some platform specific early initializations, that includes
+        * setting up the hash table pointers. It also sets up some interrupt-mapping
+        * related options that will be used by finish_device_tree()
+        */
+       ppc_md.init_early();
+
+       /*
+        * "Finish" the device-tree, that is do the actual parsing of
+        * some of the properties like the interrupt map
+        */
+       finish_device_tree();
+
+#ifdef CONFIG_BOOTX_TEXT
+       init_boot_display();
+#endif
+
+       /*
+        * Initialize xmon
+        */
+#ifdef CONFIG_XMON_DEFAULT
+       xmon_init(1);
+#endif
+       /*
+        * Register early console
+        */
+       register_early_udbg_console();
+
+       /* Save unparsed command line copy for /proc/cmdline */
+       strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
+
+       parse_early_param();
+
+#ifdef CONFIG_SMP
+       /*
+        * iSeries has already initialized the cpu maps at this point.
+        */
+       setup_cpu_maps();
+
+       /* Release secondary cpus out of their spinloops at 0x60 now that
+        * we can map physical -> logical CPU ids
+        */
+       smp_release_cpus();
+#endif
+
+       printk("Starting Linux PPC64 %s\n", system_utsname.version);
+
+       printk("-----------------------------------------------------\n");
+       printk("ppc64_pft_size                = 0x%lx\n", ppc64_pft_size);
+       printk("ppc64_debug_switch            = 0x%lx\n", ppc64_debug_switch);
+       printk("ppc64_interrupt_controller    = 0x%ld\n", ppc64_interrupt_controller);
+       printk("systemcfg                     = 0x%p\n", systemcfg);
+       printk("systemcfg->platform           = 0x%x\n", systemcfg->platform);
+       printk("systemcfg->processorCount     = 0x%lx\n", systemcfg->processorCount);
+       printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
+       printk("ppc64_caches.dcache_line_size = 0x%x\n",
+                       ppc64_caches.dline_size);
+       printk("ppc64_caches.icache_line_size = 0x%x\n",
+                       ppc64_caches.iline_size);
+       printk("htab_address                  = 0x%p\n", htab_address);
+       printk("htab_hash_mask                = 0x%lx\n", htab_hash_mask);
+       printk("-----------------------------------------------------\n");
+
+       mm_init_ppc64();
+
+       DBG(" <- setup_system()\n");
+}
+
+/* also used by kexec */
+void machine_shutdown(void)
+{
+       if (ppc_md.nvram_sync)
+               ppc_md.nvram_sync();
+}
+
+void machine_restart(char *cmd)
+{
+       machine_shutdown();
+       ppc_md.restart(cmd);
+#ifdef CONFIG_SMP
+       smp_send_stop();
+#endif
+       printk(KERN_EMERG "System Halted, OK to turn off power\n");
+       local_irq_disable();
+       while (1) ;
+}
+
+void machine_power_off(void)
+{
+       machine_shutdown();
+       ppc_md.power_off();
+#ifdef CONFIG_SMP
+       smp_send_stop();
+#endif
+       printk(KERN_EMERG "System Halted, OK to turn off power\n");
+       local_irq_disable();
+       while (1) ;
+}
+/* Used by the G5 thermal driver */
+EXPORT_SYMBOL_GPL(machine_power_off);
+
+void machine_halt(void)
+{
+       machine_shutdown();
+       ppc_md.halt();
+#ifdef CONFIG_SMP
+       smp_send_stop();
+#endif
+       printk(KERN_EMERG "System Halted, OK to turn off power\n");
+       local_irq_disable();
+       while (1) ;
+}
+
+static int ppc64_panic_event(struct notifier_block *this,
+                             unsigned long event, void *ptr)
+{
+       ppc_md.panic((char *)ptr);  /* May not return */
+       return NOTIFY_DONE;
+}
+
+
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(unsigned int, pvr);
+#endif
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+       unsigned long cpu_id = (unsigned long)v - 1;
+       unsigned int pvr;
+       unsigned short maj;
+       unsigned short min;
+
+       if (cpu_id == NR_CPUS) {
+               seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
+
+               if (ppc_md.show_cpuinfo != NULL)
+                       ppc_md.show_cpuinfo(m);
+
+               return 0;
+       }
+
+       /* We only show online cpus: disable preempt (overzealous, I
+        * knew) to prevent cpu going down. */
+       preempt_disable();
+       if (!cpu_online(cpu_id)) {
+               preempt_enable();
+               return 0;
+       }
+
+#ifdef CONFIG_SMP
+       pvr = per_cpu(pvr, cpu_id);
+#else
+       pvr = mfspr(SPRN_PVR);
+#endif
+       maj = (pvr >> 8) & 0xFF;
+       min = pvr & 0xFF;
+
+       seq_printf(m, "processor\t: %lu\n", cpu_id);
+       seq_printf(m, "cpu\t\t: ");
+
+       if (cur_cpu_spec->pvr_mask)
+               seq_printf(m, "%s", cur_cpu_spec->cpu_name);
+       else
+               seq_printf(m, "unknown (%08x)", pvr);
+
+#ifdef CONFIG_ALTIVEC
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               seq_printf(m, ", altivec supported");
+#endif /* CONFIG_ALTIVEC */
+
+       seq_printf(m, "\n");
+
+       /*
+        * Assume here that all clock rates are the same in a
+        * smp system.  -- Cort
+        */
+       seq_printf(m, "clock\t\t: %lu.%06luMHz\n", ppc_proc_freq / 1000000,
+                  ppc_proc_freq % 1000000);
+
+       seq_printf(m, "revision\t: %hd.%hd\n\n", maj, min);
+
+       preempt_enable();
+       return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+       return *pos <= NR_CPUS ? (void *)((*pos)+1) : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       ++*pos;
+       return c_start(m, pos);
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+struct seq_operations cpuinfo_op = {
+       .start =c_start,
+       .next = c_next,
+       .stop = c_stop,
+       .show = show_cpuinfo,
+};
+
+/*
+ * These three variables are used to save values passed to us by prom_init()
+ * via the device tree. The TCE variables are needed because with a memory_limit
+ * in force we may need to explicitly map the TCE are at the top of RAM.
+ */
+unsigned long memory_limit;
+unsigned long tce_alloc_start;
+unsigned long tce_alloc_end;
+
+#ifdef CONFIG_PPC_ISERIES
+/*
+ * On iSeries we just parse the mem=X option from the command line.
+ * On pSeries it's a bit more complicated, see prom_init_mem()
+ */
+static int __init early_parsemem(char *p)
+{
+       if (!p)
+               return 0;
+
+       memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
+
+       return 0;
+}
+early_param("mem", early_parsemem);
+#endif /* CONFIG_PPC_ISERIES */
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+static int __init set_preferred_console(void)
+{
+       struct device_node *prom_stdout = NULL;
+       char *name;
+       u32 *spd;
+       int offset = 0;
+
+       DBG(" -> set_preferred_console()\n");
+
+       /* The user has requested a console so this is already set up. */
+       if (strstr(saved_command_line, "console=")) {
+               DBG(" console was specified !\n");
+               return -EBUSY;
+       }
+
+       if (!of_chosen) {
+               DBG(" of_chosen is NULL !\n");
+               return -ENODEV;
+       }
+       /* We are getting a weird phandle from OF ... */
+       /* ... So use the full path instead */
+       name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
+       if (name == NULL) {
+               DBG(" no linux,stdout-path !\n");
+               return -ENODEV;
+       }
+       prom_stdout = of_find_node_by_path(name);
+       if (!prom_stdout) {
+               DBG(" can't find stdout package %s !\n", name);
+               return -ENODEV;
+       }       
+       DBG("stdout is %s\n", prom_stdout->full_name);
+
+       name = (char *)get_property(prom_stdout, "name", NULL);
+       if (!name) {
+               DBG(" stdout package has no name !\n");
+               goto not_found;
+       }
+       spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
+
+       if (0)
+               ;
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+       else if (strcmp(name, "serial") == 0) {
+               int i;
+               u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
+               if (i > 8) {
+                       switch (reg[1]) {
+                               case 0x3f8:
+                                       offset = 0;
+                                       break;
+                               case 0x2f8:
+                                       offset = 1;
+                                       break;
+                               case 0x898:
+                                       offset = 2;
+                                       break;
+                               case 0x890:
+                                       offset = 3;
+                                       break;
+                               default:
+                                       /* We dont recognise the serial port */
+                                       goto not_found;
+                       }
+               }
+       }
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
+#ifdef CONFIG_PPC_PSERIES
+       else if (strcmp(name, "vty") == 0) {
+               u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
+               char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
+
+               if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
+                       /* Host Virtual Serial Interface */
+                       int offset;
+                       switch (reg[0]) {
+                               case 0x30000000:
+                                       offset = 0;
+                                       break;
+                               case 0x30000001:
+                                       offset = 1;
+                                       break;
+                               default:
+                                       goto not_found;
+                       }
+                       of_node_put(prom_stdout);
+                       DBG("Found hvsi console at offset %d\n", offset);
+                       return add_preferred_console("hvsi", offset, NULL);
+               } else {
+                       /* pSeries LPAR virtual console */
+                       of_node_put(prom_stdout);
+                       DBG("Found hvc console\n");
+                       return add_preferred_console("hvc", 0, NULL);
+               }
+       }
+#endif /* CONFIG_PPC_PSERIES */
+#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
+       else if (strcmp(name, "ch-a") == 0)
+               offset = 0;
+       else if (strcmp(name, "ch-b") == 0)
+               offset = 1;
+#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
+       else
+               goto not_found;
+       of_node_put(prom_stdout);
+
+       DBG("Found serial console at ttyS%d\n", offset);
+
+       if (spd) {
+               static char __initdata opt[16];
+               sprintf(opt, "%d", *spd);
+               return add_preferred_console("ttyS", offset, opt);
+       } else
+               return add_preferred_console("ttyS", offset, NULL);
+
+ not_found:
+       DBG("No preferred console found !\n");
+       of_node_put(prom_stdout);
+       return -ENODEV;
+}
+console_initcall(set_preferred_console);
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+#ifdef CONFIG_IRQSTACKS
+static void __init irqstack_early_init(void)
+{
+       unsigned int i;
+
+       /*
+        * interrupt stacks must be under 256MB, we cannot afford to take
+        * SLB misses on them.
+        */
+       for_each_cpu(i) {
+               softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
+                                       THREAD_SIZE, 0x10000000));
+               hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
+                                       THREAD_SIZE, 0x10000000));
+       }
+}
+#else
+#define irqstack_early_init()
+#endif
+
+/*
+ * Stack space used when we detect a bad kernel stack pointer, and
+ * early in SMP boots before relocation is enabled.
+ */
+static void __init emergency_stack_init(void)
+{
+       unsigned long limit;
+       unsigned int i;
+
+       /*
+        * Emergency stacks must be under 256MB, we cannot afford to take
+        * SLB misses on them. The ABI also requires them to be 128-byte
+        * aligned.
+        *
+        * Since we use these as temporary stacks during secondary CPU
+        * bringup, we need to get at them in real mode. This means they
+        * must also be within the RMO region.
+        */
+       limit = min(0x10000000UL, lmb.rmo_size);
+
+       for_each_cpu(i)
+               paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
+                                               limit)) + PAGE_SIZE;
+}
+
+/*
+ * Called from setup_arch to initialize the bitmap of available
+ * syscalls in the systemcfg page
+ */
+void __init setup_syscall_map(void)
+{
+       unsigned int i, count64 = 0, count32 = 0;
+       extern unsigned long *sys_call_table;
+       extern unsigned long sys_ni_syscall;
+
+
+       for (i = 0; i < __NR_syscalls; i++) {
+               if (sys_call_table[i*2] != sys_ni_syscall) {
+                       count64++;
+                       systemcfg->syscall_map_64[i >> 5] |=
+                               0x80000000UL >> (i & 0x1f);
+               }
+               if (sys_call_table[i*2+1] != sys_ni_syscall) {
+                       count32++;
+                       systemcfg->syscall_map_32[i >> 5] |=
+                               0x80000000UL >> (i & 0x1f);
+               }
+       }
+       printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n",
+              count32, count64);
+}
+
+/*
+ * Called into from start_kernel, after lock_kernel has been called.
+ * Initializes bootmem, which is unsed to manage page allocation until
+ * mem_init is called.
+ */
+void __init setup_arch(char **cmdline_p)
+{
+       extern void do_init_bootmem(void);
+
+       ppc64_boot_msg(0x12, "Setup Arch");
+
+       *cmdline_p = cmd_line;
+
+       /*
+        * Set cache line size based on type of cpu as a default.
+        * Systems with OF can look in the properties on the cpu node(s)
+        * for a possibly more accurate value.
+        */
+       dcache_bsize = ppc64_caches.dline_size;
+       icache_bsize = ppc64_caches.iline_size;
+
+       /* reboot on panic */
+       panic_timeout = 180;
+
+       if (ppc_md.panic)
+               notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
+
+       init_mm.start_code = PAGE_OFFSET;
+       init_mm.end_code = (unsigned long) _etext;
+       init_mm.end_data = (unsigned long) _edata;
+       init_mm.brk = klimit;
+       
+       irqstack_early_init();
+       emergency_stack_init();
+
+       stabs_alloc();
+
+       /* set up the bootmem stuff with available memory */
+       do_init_bootmem();
+       sparse_init();
+
+       /* initialize the syscall map in systemcfg */
+       setup_syscall_map();
+
+#ifdef CONFIG_DUMMY_CONSOLE
+       conswitchp = &dummy_con;
+#endif
+
+       ppc_md.setup_arch();
+
+       /* Use the default idle loop if the platform hasn't provided one. */
+       if (NULL == ppc_md.idle_loop) {
+               ppc_md.idle_loop = default_idle;
+               printk(KERN_INFO "Using default idle loop\n");
+       }
+
+       paging_init();
+       ppc64_boot_msg(0x15, "Setup Done");
+}
+
+
+/* ToDo: do something useful if ppc_md is not yet setup. */
+#define PPC64_LINUX_FUNCTION 0x0f000000
+#define PPC64_IPL_MESSAGE 0xc0000000
+#define PPC64_TERM_MESSAGE 0xb0000000
+
+static void ppc64_do_msg(unsigned int src, const char *msg)
+{
+       if (ppc_md.progress) {
+               char buf[128];
+
+               sprintf(buf, "%08X\n", src);
+               ppc_md.progress(buf, 0);
+               snprintf(buf, 128, "%s", msg);
+               ppc_md.progress(buf, 0);
+       }
+}
+
+/* Print a boot progress message. */
+void ppc64_boot_msg(unsigned int src, const char *msg)
+{
+       ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
+       printk("[boot]%04x %s\n", src, msg);
+}
+
+/* Print a termination message (print only -- does not stop the kernel) */
+void ppc64_terminate_msg(unsigned int src, const char *msg)
+{
+       ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg);
+       printk("[terminate]%04x %s\n", src, msg);
+}
+
+#ifndef CONFIG_PPC_ISERIES
+/*
+ * This function can be used by platforms to "find" legacy serial ports.
+ * It works for "serial" nodes under an "isa" node, and will try to
+ * respect the "ibm,aix-loc" property if any. It works with up to 8
+ * ports.
+ */
+
+#define MAX_LEGACY_SERIAL_PORTS        8
+static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
+static unsigned int old_serial_count;
+
+void __init generic_find_legacy_serial_ports(u64 *physport,
+               unsigned int *default_speed)
+{
+       struct device_node *np;
+       u32 *sizeprop;
+
+       struct isa_reg_property {
+               u32 space;
+               u32 address;
+               u32 size;
+       };
+       struct pci_reg_property {
+               struct pci_address addr;
+               u32 size_hi;
+               u32 size_lo;
+       };                                                                        
+
+       DBG(" -> generic_find_legacy_serial_port()\n");
+
+       *physport = 0;
+       if (default_speed)
+               *default_speed = 0;
+
+       np = of_find_node_by_path("/");
+       if (!np)
+               return;
+
+       /* First fill our array */
+       for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
+               struct device_node *isa, *pci;
+               struct isa_reg_property *reg;
+               unsigned long phys_size, addr_size, io_base;
+               u32 *rangesp;
+               u32 *interrupts, *clk, *spd;
+               char *typep;
+               int index, rlen, rentsize;
+
+               /* Ok, first check if it's under an "isa" parent */
+               isa = of_get_parent(np);
+               if (!isa || strcmp(isa->name, "isa")) {
+                       DBG("%s: no isa parent found\n", np->full_name);
+                       continue;
+               }
+               
+               /* Now look for an "ibm,aix-loc" property that gives us ordering
+                * if any...
+                */
+               typep = (char *)get_property(np, "ibm,aix-loc", NULL);
+
+               /* Get the ISA port number */
+               reg = (struct isa_reg_property *)get_property(np, "reg", NULL); 
+               if (reg == NULL)
+                       goto next_port;
+               /* We assume the interrupt number isn't translated ... */
+               interrupts = (u32 *)get_property(np, "interrupts", NULL);
+               /* get clock freq. if present */
+               clk = (u32 *)get_property(np, "clock-frequency", NULL);
+               /* get default speed if present */
+               spd = (u32 *)get_property(np, "current-speed", NULL);
+               /* Default to locate at end of array */
+               index = old_serial_count; /* end of the array by default */
+
+               /* If we have a location index, then use it */
+               if (typep && *typep == 'S') {
+                       index = simple_strtol(typep+1, NULL, 0) - 1;
+                       /* if index is out of range, use end of array instead */
+                       if (index >= MAX_LEGACY_SERIAL_PORTS)
+                               index = old_serial_count;
+                       /* if our index is still out of range, that mean that
+                        * array is full, we could scan for a free slot but that
+                        * make little sense to bother, just skip the port
+                        */
+                       if (index >= MAX_LEGACY_SERIAL_PORTS)
+                               goto next_port;
+                       if (index >= old_serial_count)
+                               old_serial_count = index + 1;
+                       /* Check if there is a port who already claimed our slot */
+                       if (serial_ports[index].iobase != 0) {
+                               /* if we still have some room, move it, else override */
+                               if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) {
+                                       DBG("Moved legacy port %d -> %d\n", index,
+                                           old_serial_count);
+                                       serial_ports[old_serial_count++] =
+                                               serial_ports[index];
+                               } else {
+                                       DBG("Replacing legacy port %d\n", index);
+                               }
+                       }
+               }
+               if (index >= MAX_LEGACY_SERIAL_PORTS)
+                       goto next_port;
+               if (index >= old_serial_count)
+                       old_serial_count = index + 1;
+
+               /* Now fill the entry */
+               memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port));
+               serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16;
+               serial_ports[index].iobase = reg->address;
+               serial_ports[index].irq = interrupts ? interrupts[0] : 0;
+               serial_ports[index].flags = ASYNC_BOOT_AUTOCONF;
+
+               DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n",
+                   index,
+                   serial_ports[index].iobase,
+                   serial_ports[index].irq,
+                   serial_ports[index].uartclk);
+
+               /* Get phys address of IO reg for port 1 */
+               if (index != 0)
+                       goto next_port;
+
+               pci = of_get_parent(isa);
+               if (!pci) {
+                       DBG("%s: no pci parent found\n", np->full_name);
+                       goto next_port;
+               }
+
+               rangesp = (u32 *)get_property(pci, "ranges", &rlen);
+               if (rangesp == NULL) {
+                       of_node_put(pci);
+                       goto next_port;
+               }
+               rlen /= 4;
+
+               /* we need the #size-cells of the PCI bridge node itself */
+               phys_size = 1;
+               sizeprop = (u32 *)get_property(pci, "#size-cells", NULL);
+               if (sizeprop != NULL)
+                       phys_size = *sizeprop;
+               /* we need the parent #addr-cells */
+               addr_size = prom_n_addr_cells(pci);
+               rentsize = 3 + addr_size + phys_size;
+               io_base = 0;
+               for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) {
+                       if (((rangesp[0] >> 24) & 0x3) != 1)
+                               continue; /* not IO space */
+                       io_base = rangesp[3];
+                       if (addr_size == 2)
+                               io_base = (io_base << 32) | rangesp[4];
+               }
+               if (io_base != 0) {
+                       *physport = io_base + reg->address;
+                       if (default_speed && spd)
+                               *default_speed = *spd;
+               }
+               of_node_put(pci);
+       next_port:
+               of_node_put(isa);
+       }
+
+       DBG(" <- generic_find_legacy_serial_port()\n");
+}
+
+static struct platform_device serial_device = {
+       .name   = "serial8250",
+       .id     = PLAT8250_DEV_PLATFORM,
+       .dev    = {
+               .platform_data = serial_ports,
+       },
+};
+
+static int __init serial_dev_init(void)
+{
+       return platform_device_register(&serial_device);
+}
+arch_initcall(serial_dev_init);
+
+#endif /* CONFIG_PPC_ISERIES */
+
+int check_legacy_ioport(unsigned long base_port)
+{
+       if (ppc_md.check_legacy_ioport == NULL)
+               return 0;
+       return ppc_md.check_legacy_ioport(base_port);
+}
+EXPORT_SYMBOL(check_legacy_ioport);
+
+#ifdef CONFIG_XMON
+static int __init early_xmon(char *p)
+{
+       /* ensure xmon is enabled */
+       if (p) {
+               if (strncmp(p, "on", 2) == 0)
+                       xmon_init(1);
+               if (strncmp(p, "off", 3) == 0)
+                       xmon_init(0);
+               if (strncmp(p, "early", 5) != 0)
+                       return 0;
+       }
+       xmon_init(1);
+       debugger(NULL);
+
+       return 0;
+}
+early_param("xmon", early_xmon);
+#endif
+
+void cpu_die(void)
+{
+       if (ppc_md.cpu_die)
+               ppc_md.cpu_die();
+}
similarity index 54%
rename from arch/ppc64/kernel/signal32.c
rename to arch/powerpc/kernel/signal_32.c
index a8b7a5a..92452b2 100644 (file)
 /*
- * signal32.c: Support 32bit signal syscalls.
+ * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
  *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  * Copyright (C) 2001 IBM
  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  *
- * These routines maintain argument size conversion between 32bit and 64bit
- * environment.
+ *  Derived from "arch/i386/kernel/signal.c"
+ *    Copyright (C) 1991, 1992 Linus Torvalds
+ *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
  *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
  */
 
 #include <linux/config.h>
 #include <linux/sched.h>
-#include <linux/mm.h> 
+#include <linux/mm.h>
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
-#include <linux/syscalls.h>
 #include <linux/errno.h>
 #include <linux/elf.h>
+#ifdef CONFIG_PPC64
+#include <linux/syscalls.h>
 #include <linux/compat.h>
 #include <linux/ptrace.h>
-#include <asm/ppc32.h>
+#else
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tty.h>
+#include <linux/binfmts.h>
+#include <linux/suspend.h>
+#endif
+
 #include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#ifdef CONFIG_PPC64
+#include <asm/ppc32.h>
 #include <asm/ppcdebug.h>
 #include <asm/unistd.h>
-#include <asm/cacheflush.h>
 #include <asm/vdso.h>
+#else
+#include <asm/ucontext.h>
+#include <asm/pgtable.h>
+#endif
 
-#define DEBUG_SIG 0
+#undef DEBUG_SIG
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
-#define GP_REGS_SIZE32 min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
+#ifdef CONFIG_PPC64
+#define do_signal      do_signal32
+#define sys_sigsuspend compat_sys_sigsuspend
+#define sys_rt_sigsuspend      compat_sys_rt_sigsuspend
+#define sys_rt_sigreturn       compat_sys_rt_sigreturn
+#define sys_sigaction  compat_sys_sigaction
+#define sys_swapcontext        compat_sys_swapcontext
+#define sys_sigreturn  compat_sys_sigreturn
+
+#define old_sigaction  old_sigaction32
+#define sigcontext     sigcontext32
+#define mcontext       mcontext32
+#define ucontext       ucontext32
+
+/*
+ * Returning 0 means we return to userspace via
+ * ret_from_except and thus restore all user
+ * registers from *regs.  This is what we need
+ * to do when a signal has been delivered.
+ */
+#define sigreturn_exit(regs)   return 0
+
+#define GP_REGS_SIZE   min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
+#undef __SIGNAL_FRAMESIZE
+#define __SIGNAL_FRAMESIZE     __SIGNAL_FRAMESIZE32
+#undef ELF_NVRREG
+#define ELF_NVRREG     ELF_NVRREG32
+
+/*
+ * Functions for flipping sigsets (thanks to brain dead generic
+ * implementation that makes things simple for little endian only)
+ */
+static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
+{
+       compat_sigset_t cset;
+
+       switch (_NSIG_WORDS) {
+       case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
+               cset.sig[7] = set->sig[3] >> 32;
+       case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
+               cset.sig[5] = set->sig[2] >> 32;
+       case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
+               cset.sig[3] = set->sig[1] >> 32;
+       case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
+               cset.sig[1] = set->sig[0] >> 32;
+       }
+       return copy_to_user(uset, &cset, sizeof(*uset));
+}
+
+static inline int get_sigset_t(sigset_t *set,
+                              const compat_sigset_t __user *uset)
+{
+       compat_sigset_t s32;
+
+       if (copy_from_user(&s32, uset, sizeof(*uset)))
+               return -EFAULT;
+
+       /*
+        * Swap the 2 words of the 64-bit sigset_t (they are stored
+        * in the "wrong" endian in 32-bit user storage).
+        */
+       switch (_NSIG_WORDS) {
+       case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
+       case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
+       case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
+       case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
+       }
+       return 0;
+}
+
+static inline int get_old_sigaction(struct k_sigaction *new_ka,
+               struct old_sigaction __user *act)
+{
+       compat_old_sigset_t mask;
+       compat_uptr_t handler, restorer;
+
+       if (get_user(handler, &act->sa_handler) ||
+           __get_user(restorer, &act->sa_restorer) ||
+           __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
+           __get_user(mask, &act->sa_mask))
+               return -EFAULT;
+       new_ka->sa.sa_handler = compat_ptr(handler);
+       new_ka->sa.sa_restorer = compat_ptr(restorer);
+       siginitset(&new_ka->sa.sa_mask, mask);
+       return 0;
+}
+
+static inline compat_uptr_t to_user_ptr(void *kp)
+{
+       return (compat_uptr_t)(u64)kp;
+}
+
+#define from_user_ptr(p)       compat_ptr(p)
+
+static inline int save_general_regs(struct pt_regs *regs,
+               struct mcontext __user *frame)
+{
+       elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
+       int i;
+
+       for (i = 0; i <= PT_RESULT; i ++)
+               if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
+                       return -EFAULT;
+       return 0;
+}
+
+static inline int restore_general_regs(struct pt_regs *regs,
+               struct mcontext __user *sr)
+{
+       elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
+       int i;
+
+       for (i = 0; i <= PT_RESULT; i++) {
+               if ((i == PT_MSR) || (i == PT_SOFTE))
+                       continue;
+               if (__get_user(gregs[i], &sr->mc_gregs[i]))
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+#else /* CONFIG_PPC64 */
+
+extern void sigreturn_exit(struct pt_regs *);
+
+#define GP_REGS_SIZE   min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
+
+static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
+{
+       return copy_to_user(uset, set, sizeof(*uset));
+}
+
+static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
+{
+       return copy_from_user(set, uset, sizeof(*uset));
+}
+
+static inline int get_old_sigaction(struct k_sigaction *new_ka,
+               struct old_sigaction __user *act)
+{
+       old_sigset_t mask;
+
+       if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+                       __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
+                       __get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
+               return -EFAULT;
+       __get_user(new_ka->sa.sa_flags, &act->sa_flags);
+       __get_user(mask, &act->sa_mask);
+       siginitset(&new_ka->sa.sa_mask, mask);
+       return 0;
+}
+
+#define to_user_ptr(p)         (p)
+#define from_user_ptr(p)       (p)
+
+static inline int save_general_regs(struct pt_regs *regs,
+               struct mcontext __user *frame)
+{
+       return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
+}
+
+static inline int restore_general_regs(struct pt_regs *regs,
+               struct mcontext __user *sr)
+{
+       /* copy up to but not including MSR */
+       if (__copy_from_user(regs, &sr->mc_gregs,
+                               PT_MSR * sizeof(elf_greg_t)))
+               return -EFAULT;
+       /* copy from orig_r3 (the word after the MSR) up to the end */
+       if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
+                               GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
+               return -EFAULT;
+       return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
+int do_signal(sigset_t *oldset, struct pt_regs *regs);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
+              struct pt_regs *regs)
+{
+       sigset_t saveset;
+
+       mask &= _BLOCKABLE;
+       spin_lock_irq(&current->sighand->siglock);
+       saveset = current->blocked;
+       siginitset(&current->blocked, mask);
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       regs->result = -EINTR;
+       regs->gpr[3] = EINTR;
+       regs->ccr |= 0x10000000;
+       while (1) {
+               current->state = TASK_INTERRUPTIBLE;
+               schedule();
+               if (do_signal(&saveset, regs))
+                       sigreturn_exit(regs);
+       }
+}
+
+long sys_rt_sigsuspend(
+#ifdef CONFIG_PPC64
+               compat_sigset_t __user *unewset,
+#else
+               sigset_t __user *unewset,
+#endif
+               size_t sigsetsize, int p3, int p4,
+               int p6, int p7, struct pt_regs *regs)
+{
+       sigset_t saveset, newset;
+
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(sigset_t))
+               return -EINVAL;
+
+       if (get_sigset_t(&newset, unewset))
+               return -EFAULT;
+       sigdelsetmask(&newset, ~_BLOCKABLE);
+
+       spin_lock_irq(&current->sighand->siglock);
+       saveset = current->blocked;
+       current->blocked = newset;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       regs->result = -EINTR;
+       regs->gpr[3] = EINTR;
+       regs->ccr |= 0x10000000;
+       while (1) {
+               current->state = TASK_INTERRUPTIBLE;
+               schedule();
+               if (do_signal(&saveset, regs))
+                       sigreturn_exit(regs);
+       }
+}
+
+#ifdef CONFIG_PPC32
+long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
+               int r6, int r7, int r8, struct pt_regs *regs)
+{
+       return do_sigaltstack(uss, uoss, regs->gpr[1]);
+}
+#endif
+
+long sys_sigaction(int sig, struct old_sigaction __user *act,
+               struct old_sigaction __user *oact)
+{
+       struct k_sigaction new_ka, old_ka;
+       int ret;
+
+#ifdef CONFIG_PPC64
+       if (sig < 0)
+               sig = -sig;
+#endif
+
+       if (act) {
+               if (get_old_sigaction(&new_ka, act))
+                       return -EFAULT;
+       }
+
+       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+       if (!ret && oact) {
+               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+                   __put_user(to_user_ptr(old_ka.sa.sa_handler),
+                           &oact->sa_handler) ||
+                   __put_user(to_user_ptr(old_ka.sa.sa_restorer),
+                           &oact->sa_restorer) ||
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
+                       return -EFAULT;
+       }
+
+       return ret;
+}
 
 /*
  * When we have signals to deliver, we set up on the
  * user stack, going down from the original stack pointer:
- *     a sigregs32 struct
- *     a sigcontext32 struct
- *     a gap of __SIGNAL_FRAMESIZE32 bytes
+ *     a sigregs struct
+ *     a sigcontext struct
+ *     a gap of __SIGNAL_FRAMESIZE bytes
  *
  * Each of these things must be a multiple of 16 bytes in size.
  *
  */
-struct sigregs32 {
-       struct mcontext32       mctx;           /* all the register values */
+struct sigregs {
+       struct mcontext mctx;           /* all the register values */
        /*
         * Programs using the rs6000/xcoff abi can save up to 19 gp
         * regs and 18 fp regs below sp before decrementing it.
@@ -64,17 +361,21 @@ struct sigregs32 {
 /*
  *  When we have rt signals to deliver, we set up on the
  *  user stack, going down from the original stack pointer:
- *     one rt_sigframe32 struct (siginfo + ucontext + ABI gap)
- *     a gap of __SIGNAL_FRAMESIZE32+16 bytes
- *  (the +16 is to get the siginfo and ucontext32 in the same
+ *     one rt_sigframe struct (siginfo + ucontext + ABI gap)
+ *     a gap of __SIGNAL_FRAMESIZE+16 bytes
+ *  (the +16 is to get the siginfo and ucontext in the same
  *  positions as in older kernels).
  *
  *  Each of these things must be a multiple of 16 bytes in size.
  *
  */
-struct rt_sigframe32 {
-       compat_siginfo_t        info;
-       struct ucontext32       uc;
+struct rt_sigframe {
+#ifdef CONFIG_PPC64
+       compat_siginfo_t info;
+#else
+       struct siginfo info;
+#endif
+       struct ucontext uc;
        /*
         * Programs using the rs6000/xcoff abi can save up to 19 gp
         * regs and 18 fp regs below sp before decrementing it.
@@ -82,66 +383,24 @@ struct rt_sigframe32 {
        int                     abigap[56];
 };
 
-
-/*
- * Common utility functions used by signal and context support
- *
- */
-
-/*
- * Restore the user process's signal mask
- * (implemented in signal.c)
- */
-extern void restore_sigmask(sigset_t *set);
-
-/*
- * Functions for flipping sigsets (thanks to brain dead generic
- * implementation that makes things simple for little endian only
- */
-static inline void compat_from_sigset(compat_sigset_t *compat, sigset_t *set)
-{
-       switch (_NSIG_WORDS) {
-       case 4: compat->sig[5] = set->sig[3] & 0xffffffffull ;
-               compat->sig[7] = set->sig[3] >> 32; 
-       case 3: compat->sig[4] = set->sig[2] & 0xffffffffull ;
-               compat->sig[5] = set->sig[2] >> 32; 
-       case 2: compat->sig[2] = set->sig[1] & 0xffffffffull ;
-               compat->sig[3] = set->sig[1] >> 32; 
-       case 1: compat->sig[0] = set->sig[0] & 0xffffffffull ;
-               compat->sig[1] = set->sig[0] >> 32; 
-       }
-}
-
-static inline void sigset_from_compat(sigset_t *set, compat_sigset_t *compat)
-{
-       switch (_NSIG_WORDS) {
-       case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32);
-       case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32);
-       case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32);
-       case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32);
-       }
-}
-
-
 /*
  * Save the current user registers on the user stack.
- * We only save the altivec registers if the process has used
- * altivec instructions at some point.
+ * We only save the altivec/spe registers if the process has used
+ * altivec/spe instructions at some point.
  */
-static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame, int sigret)
+static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+               int sigret)
 {
-       elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
-       int i, err = 0;
-
+#ifdef CONFIG_PPC32
+       CHECK_FULL_REGS(regs);
+#endif
        /* Make sure floating point registers are stored in regs */
        flush_fp_to_thread(current);
 
        /* save general and floating-point registers */
-       for (i = 0; i <= PT_RESULT; i ++)
-               err |= __put_user((unsigned int)gregs[i], &frame->mc_gregs[i]);
-       err |= __copy_to_user(&frame->mc_fregs, current->thread.fpr,
-                             ELF_NFPREG * sizeof(double));
-       if (err)
+       if (save_general_regs(regs, frame) ||
+           __copy_to_user(&frame->mc_fregs, current->thread.fpr,
+                   ELF_NFPREG * sizeof(double)))
                return 1;
 
        current->thread.fpscr = 0;      /* turn off all fp exceptions */
@@ -151,7 +410,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
        if (current->thread.used_vr) {
                flush_altivec_to_thread(current);
                if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
-                                  ELF_NVRREG32 * sizeof(vector128)))
+                                  ELF_NVRREG * sizeof(vector128)))
                        return 1;
                /* set MSR_VEC in the saved MSR value to indicate that
                   frame->mc_vregs contains valid data */
@@ -169,6 +428,25 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
                return 1;
 #endif /* CONFIG_ALTIVEC */
 
+#ifdef CONFIG_SPE
+       /* save spe registers */
+       if (current->thread.used_spe) {
+               flush_spe_to_thread(current);
+               if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
+                                  ELF_NEVRREG * sizeof(u32)))
+                       return 1;
+               /* set MSR_SPE in the saved MSR value to indicate that
+                  frame->mc_vregs contains valid data */
+               if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
+                       return 1;
+       }
+       /* else assert((regs->msr & MSR_SPE) == 0) */
+
+       /* We always copy to/from spefscr */
+       if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
+               return 1;
+#endif /* CONFIG_SPE */
+
        if (sigret) {
                /* Set up the sigreturn trampoline: li r0,sigret; sc */
                if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
@@ -186,13 +464,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
  * (except for MSR).
  */
 static long restore_user_regs(struct pt_regs *regs,
-                             struct mcontext32 __user *sr, int sig)
+                             struct mcontext __user *sr, int sig)
 {
-       elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
-       int i;
-       long err = 0;
+       long err;
        unsigned int save_r2 = 0;
-#ifdef CONFIG_ALTIVEC
+#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
        unsigned long msr;
 #endif
 
@@ -202,11 +478,7 @@ static long restore_user_regs(struct pt_regs *regs,
         */
        if (!sig)
                save_r2 = (unsigned int)regs->gpr[2];
-       for (i = 0; i <= PT_RESULT; i++) {
-               if ((i == PT_MSR) || (i == PT_SOFTE))
-                       continue;
-               err |= __get_user(gregs[i], &sr->mc_gregs[i]);
-       }
+       err = restore_general_regs(regs, sr);
        if (!sig)
                regs->gpr[2] = (unsigned long) save_r2;
        if (err)
@@ -229,135 +501,51 @@ static long restore_user_regs(struct pt_regs *regs,
                                     sizeof(sr->mc_vregs)))
                        return 1;
        } else if (current->thread.used_vr)
-               memset(current->thread.vr, 0, ELF_NVRREG32 * sizeof(vector128));
+               memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
 
        /* Always get VRSAVE back */
        if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
                return 1;
 #endif /* CONFIG_ALTIVEC */
 
+#ifdef CONFIG_SPE
+       /* force the process to reload the spe registers from
+          current->thread when it next does spe instructions */
+       regs->msr &= ~MSR_SPE;
+       if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
+               /* restore spe registers from the stack */
+               if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
+                                    ELF_NEVRREG * sizeof(u32)))
+                       return 1;
+       } else if (current->thread.used_spe)
+               memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
+
+       /* Always get SPEFSCR back */
+       if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
+               return 1;
+#endif /* CONFIG_SPE */
+
 #ifndef CONFIG_SMP
        preempt_disable();
        if (last_task_used_math == current)
                last_task_used_math = NULL;
        if (last_task_used_altivec == current)
                last_task_used_altivec = NULL;
+#ifdef CONFIG_SPE
+       if (last_task_used_spe == current)
+               last_task_used_spe = NULL;
+#endif
        preempt_enable();
 #endif
        return 0;
 }
 
-
-/*
- *  Start of nonRT signal support
- *
- *     sigset_t is 32 bits for non-rt signals
- *
- *  System Calls
- *       sigaction                sys32_sigaction
- *       sigreturn                sys32_sigreturn
- *
- *  Note sigsuspend has no special 32 bit routine - uses the 64 bit routine
- *
- *  Other routines
- *        setup_frame32
- */
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-long sys32_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
-              struct pt_regs *regs)
-{
-       sigset_t saveset;
-
-       mask &= _BLOCKABLE;
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       siginitset(&current->blocked, mask);
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs->result = -EINTR;
-       regs->gpr[3] = EINTR;
-       regs->ccr |= 0x10000000;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal32(&saveset, regs))
-                       /*
-                        * Returning 0 means we return to userspace via
-                        * ret_from_except and thus restore all user
-                        * registers from *regs.  This is what we need
-                        * to do when a signal has been delivered.
-                        */
-                       return 0;
-       }
-}
-
-long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
-               struct old_sigaction32 __user *oact)
-{
-       struct k_sigaction new_ka, old_ka;
-       int ret;
-       
-       if (sig < 0)
-               sig = -sig;
-
-       if (act) {
-               compat_old_sigset_t mask;
-               compat_uptr_t handler, restorer;
-
-               if (get_user(handler, &act->sa_handler) ||
-                   __get_user(restorer, &act->sa_restorer) ||
-                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
-                   __get_user(mask, &act->sa_mask))
-                       return -EFAULT;
-               new_ka.sa.sa_handler = compat_ptr(handler);
-               new_ka.sa.sa_restorer = compat_ptr(restorer);
-               siginitset(&new_ka.sa.sa_mask, mask);
-       }
-
-       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-       if (!ret && oact) {
-               if (put_user((long)old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user((long)old_ka.sa.sa_restorer, &oact->sa_restorer) ||
-                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
-                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
-                       return -EFAULT;
-       }
-
-       return ret;
-}
-
-
-
-/*
- *  Start of RT signal support
- *
- *     sigset_t is 64 bits for rt signals
- *
- *  System Calls
- *       sigaction                sys32_rt_sigaction
- *       sigpending               sys32_rt_sigpending
- *       sigprocmask              sys32_rt_sigprocmask
- *       sigreturn                sys32_rt_sigreturn
- *       sigqueueinfo             sys32_rt_sigqueueinfo
- *       sigsuspend               sys32_rt_sigsuspend
- *
- *  Other routines
- *        setup_rt_frame32
- *        copy_siginfo_to_user32
- *        siginfo32to64
- */
-
-
-long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
+#ifdef CONFIG_PPC64
+long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
                struct sigaction32 __user *oact, size_t sigsetsize)
 {
        struct k_sigaction new_ka, old_ka;
        int ret;
-       compat_sigset_t set32;
 
        /* XXX: Don't preclude handling different sized sigset_t's.  */
        if (sigsetsize != sizeof(compat_sigset_t))
@@ -368,9 +556,7 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
 
                ret = get_user(handler, &act->sa_handler);
                new_ka.sa.sa_handler = compat_ptr(handler);
-               ret |= __copy_from_user(&set32, &act->sa_mask,
-                                       sizeof(compat_sigset_t));
-               sigset_from_compat(&new_ka.sa.sa_mask, &set32);
+               ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
                ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
                if (ret)
                        return -EFAULT;
@@ -378,10 +564,8 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
 
        ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
        if (!ret && oact) {
-               compat_from_sigset(&set32, &old_ka.sa.sa_mask);
                ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler);
-               ret |= __copy_to_user(&oact->sa_mask, &set32,
-                                     sizeof(compat_sigset_t));
+               ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
                ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
        }
        return ret;
@@ -394,41 +578,37 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
  * of a signed int (msr in 32-bit mode) and the register representation
  * of a signed int (msr in 64-bit mode) is performed.
  */
-long sys32_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
+long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
                compat_sigset_t __user *oset, size_t sigsetsize)
 {
        sigset_t s;
        sigset_t __user *up;
-       compat_sigset_t s32;
        int ret;
        mm_segment_t old_fs = get_fs();
 
        if (set) {
-               if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
-                       return -EFAULT;    
-               sigset_from_compat(&s, &s32);
+               if (get_sigset_t(&s, set))
+                       return -EFAULT;
        }
-       
+
        set_fs(KERNEL_DS);
        /* This is valid because of the set_fs() */
        up = (sigset_t __user *) &s;
        ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
-                                sigsetsize); 
+                                sigsetsize);
        set_fs(old_fs);
        if (ret)
                return ret;
        if (oset) {
-               compat_from_sigset(&s32, &s);
-               if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
+               if (put_sigset_t(oset, &s))
                        return -EFAULT;
        }
        return 0;
 }
 
-long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
+long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
 {
        sigset_t s;
-       compat_sigset_t s32;
        int ret;
        mm_segment_t old_fs = get_fs();
 
@@ -437,8 +617,7 @@ long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
        ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
        set_fs(old_fs);
        if (!ret) {
-               compat_from_sigset(&s32, &s);
-               if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
+               if (put_sigset_t(set, &s))
                        return -EFAULT;
        }
        return ret;
@@ -500,6 +679,8 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
        return err;
 }
 
+#define copy_siginfo_to_user   copy_siginfo_to_user32
+
 /*
  * Note: it is necessary to treat pid and sig as unsigned ints, with the
  * corresponding cast to a signed int to insure that the proper conversion
@@ -507,12 +688,12 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
  * (msr in 32-bit mode) and the register representation of a signed int
  * (msr in 64-bit mode) is performed.
  */
-long sys32_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
+long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
 {
        siginfo_t info;
        int ret;
        mm_segment_t old_fs = get_fs();
-       
+
        if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
            copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32))
                return -EFAULT;
@@ -522,58 +703,14 @@ long sys32_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
        set_fs (old_fs);
        return ret;
 }
-
-int sys32_rt_sigsuspend(compat_sigset_t __user * unewset, size_t sigsetsize, int p3,
-               int p4, int p6, int p7, struct pt_regs *regs)
-{
-       sigset_t saveset, newset;
-       compat_sigset_t s32;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&s32, unewset, sizeof(s32)))
-               return -EFAULT;
-
-       /*
-        * Swap the 2 words of the 64-bit sigset_t (they are stored
-        * in the "wrong" endian in 32-bit user storage).
-        */
-       sigset_from_compat(&newset, &s32);
-
-       sigdelsetmask(&newset, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs->result = -EINTR;
-       regs->gpr[3] = EINTR;
-       regs->ccr |= 0x10000000;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal32(&saveset, regs))
-                       /*
-                        * Returning 0 means we return to userspace via
-                        * ret_from_except and thus restore all user
-                        * registers from *regs.  This is what we need
-                        * to do when a signal has been delivered.
-                        */
-                       return 0;
-       }
-}
-
 /*
  *  Start Alternate signal stack support
  *
  *  System Calls
- *       sigaltatck               sys32_sigaltstack
+ *       sigaltatck               compat_sys_sigaltstack
  */
 
-int sys32_sigaltstack(u32 __new, u32 __old, int r5,
+int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
                      int r6, int r7, int r8, struct pt_regs *regs)
 {
        stack_32_t __user * newstack = (stack_32_t __user *)(long) __new;
@@ -615,76 +752,94 @@ int sys32_sigaltstack(u32 __new, u32 __old, int r5,
                return -EFAULT;
        return ret;
 }
+#endif /* CONFIG_PPC64 */
+
 
+/*
+ * Restore the user process's signal mask
+ */
+#ifdef CONFIG_PPC64
+extern void restore_sigmask(sigset_t *set);
+#else /* CONFIG_PPC64 */
+static void restore_sigmask(sigset_t *set)
+{
+       sigdelsetmask(set, ~_BLOCKABLE);
+       spin_lock_irq(&current->sighand->siglock);
+       current->blocked = *set;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+}
+#endif
 
 /*
  * Set up a signal frame for a "real-time" signal handler
  * (one which gets siginfo).
  */
-static int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
-                             siginfo_t *info, sigset_t *oldset,
-                             struct pt_regs * regs, unsigned long newsp)
+static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
+               siginfo_t *info, sigset_t *oldset,
+               struct pt_regs *regs, unsigned long newsp)
 {
-       struct rt_sigframe32 __user *rt_sf;
-       struct mcontext32 __user *frame;
+       struct rt_sigframe __user *rt_sf;
+       struct mcontext __user *frame;
        unsigned long origsp = newsp;
-       compat_sigset_t c_oldset;
 
        /* Set up Signal Frame */
        /* Put a Real Time Context onto stack */
        newsp -= sizeof(*rt_sf);
-       rt_sf = (struct rt_sigframe32 __user *)newsp;
+       rt_sf = (struct rt_sigframe __user *)newsp;
 
        /* create a stack frame for the caller of the handler */
-       newsp -= __SIGNAL_FRAMESIZE32 + 16;
+       newsp -= __SIGNAL_FRAMESIZE + 16;
 
        if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp))
                goto badframe;
 
-       compat_from_sigset(&c_oldset, oldset);
-
        /* Put the siginfo & fill in most of the ucontext */
-       if (copy_siginfo_to_user32(&rt_sf->info, info)
+       if (copy_siginfo_to_user(&rt_sf->info, info)
            || __put_user(0, &rt_sf->uc.uc_flags)
            || __put_user(0, &rt_sf->uc.uc_link)
            || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
            || __put_user(sas_ss_flags(regs->gpr[1]),
                          &rt_sf->uc.uc_stack.ss_flags)
            || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
-           || __put_user((u32)(u64)&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs)
-           || __copy_to_user(&rt_sf->uc.uc_sigmask, &c_oldset, sizeof(c_oldset)))
+           || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
+                   &rt_sf->uc.uc_regs)
+           || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
                goto badframe;
 
        /* Save user registers on the stack */
        frame = &rt_sf->uc.uc_mcontext;
-       if (put_user(regs->gpr[1], (u32 __user *)newsp))
-               goto badframe;
-
+#ifdef CONFIG_PPC64
        if (vdso32_rt_sigtramp && current->thread.vdso_base) {
                if (save_user_regs(regs, frame, 0))
                        goto badframe;
                regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
-       } else {
+       } else
+#endif
+       {
                if (save_user_regs(regs, frame, __NR_rt_sigreturn))
                        goto badframe;
                regs->link = (unsigned long) frame->tramp;
        }
-       regs->gpr[1] = (unsigned long) newsp;
+       if (put_user(regs->gpr[1], (u32 __user *)newsp))
+               goto badframe;
+       regs->gpr[1] = newsp;
        regs->gpr[3] = sig;
        regs->gpr[4] = (unsigned long) &rt_sf->info;
        regs->gpr[5] = (unsigned long) &rt_sf->uc;
        regs->gpr[6] = (unsigned long) rt_sf;
        regs->nip = (unsigned long) ka->sa.sa_handler;
        regs->trap = 0;
+#ifdef CONFIG_PPC64
        regs->result = 0;
 
        if (test_thread_flag(TIF_SINGLESTEP))
                ptrace_notify(SIGTRAP);
-
+#endif
        return 1;
 
 badframe:
-#if DEBUG_SIG
+#ifdef DEBUG_SIG
        printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
               regs, frame, newsp);
 #endif
@@ -692,46 +847,50 @@ badframe:
        return 0;
 }
 
-static long do_setcontext32(struct ucontext32 __user *ucp, struct pt_regs *regs, int sig)
+static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
 {
-       compat_sigset_t c_set;
        sigset_t set;
-       u32 mcp;
+       struct mcontext __user *mcp;
+
+       if (get_sigset_t(&set, &ucp->uc_sigmask))
+               return -EFAULT;
+#ifdef CONFIG_PPC64
+       {
+               u32 cmcp;
 
-       if (__copy_from_user(&c_set, &ucp->uc_sigmask, sizeof(c_set))
-           || __get_user(mcp, &ucp->uc_regs))
+               if (__get_user(cmcp, &ucp->uc_regs))
+                       return -EFAULT;
+               mcp = (struct mcontext __user *)(u64)cmcp;
+       }
+#else
+       if (__get_user(mcp, &ucp->uc_regs))
                return -EFAULT;
-       sigset_from_compat(&set, &c_set);
+#endif
        restore_sigmask(&set);
-       if (restore_user_regs(regs, (struct mcontext32 __user *)(u64)mcp, sig))
+       if (restore_user_regs(regs, mcp, sig))
                return -EFAULT;
 
        return 0;
 }
 
-/*
- * Handle {get,set,swap}_context operations for 32 bits processes
- */
-
-long sys32_swapcontext(struct ucontext32 __user *old_ctx,
-                      struct ucontext32 __user *new_ctx,
+long sys_swapcontext(struct ucontext __user *old_ctx,
+                      struct ucontext __user *new_ctx,
                       int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
 {
        unsigned char tmp;
-       compat_sigset_t c_set;
 
        /* Context size is for future use. Right now, we only make sure
         * we are passed something we understand
         */
-       if (ctx_size < sizeof(struct ucontext32))
+       if (ctx_size < sizeof(struct ucontext))
                return -EINVAL;
 
        if (old_ctx != NULL) {
-               compat_from_sigset(&c_set, &current->blocked);
                if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
                    || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
-                   || __copy_to_user(&old_ctx->uc_sigmask, &c_set, sizeof(c_set))
-                   || __put_user((u32)(u64)&old_ctx->uc_mcontext, &old_ctx->uc_regs))
+                   || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
+                   || __put_user(to_user_ptr(&old_ctx->uc_mcontext),
+                           &old_ctx->uc_regs))
                        return -EFAULT;
        }
        if (new_ctx == NULL)
@@ -752,27 +911,26 @@ long sys32_swapcontext(struct ucontext32 __user *old_ctx,
         * or if another thread unmaps the region containing the context.
         * We kill the task with a SIGSEGV in this situation.
         */
-       if (do_setcontext32(new_ctx, regs, 0))
+       if (do_setcontext(new_ctx, regs, 0))
                do_exit(SIGSEGV);
-
+       sigreturn_exit(regs);
+       /* doesn't actually return back to here */
        return 0;
 }
 
-long sys32_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
+long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
                     struct pt_regs *regs)
 {
-       struct rt_sigframe32 __user *rt_sf;
-       int ret;
-
+       struct rt_sigframe __user *rt_sf;
 
        /* Always make any pending restarted system calls return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-       rt_sf = (struct rt_sigframe32 __user *)
-               (regs->gpr[1] + __SIGNAL_FRAMESIZE32 + 16);
+       rt_sf = (struct rt_sigframe __user *)
+               (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
        if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
                goto bad;
-       if (do_setcontext32(&rt_sf->uc, regs, 1))
+       if (do_setcontext(&rt_sf->uc, regs, 1))
                goto bad;
 
        /*
@@ -781,62 +939,165 @@ long sys32_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
         * signal return.  But other architectures do this and we have
         * always done it up until now so it is probably better not to
         * change it.  -- paulus
-        * We use the sys32_ version that does the 32/64 bits conversion
+        */
+#ifdef CONFIG_PPC64
+       /*
+        * We use the compat_sys_ version that does the 32/64 bits conversion
         * and takes userland pointer directly. What about error checking ?
         * nobody does any...
         */
-               sys32_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
-
-       ret = regs->result;
-
-       return ret;
+       compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
+       return (int)regs->result;
+#else
+       do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
+       sigreturn_exit(regs);           /* doesn't return here */
+       return 0;
+#endif
 
  bad:
        force_sig(SIGSEGV, current);
        return 0;
 }
 
+#ifdef CONFIG_PPC32
+int sys_debug_setcontext(struct ucontext __user *ctx,
+                        int ndbg, struct sig_dbg_op __user *dbg,
+                        int r6, int r7, int r8,
+                        struct pt_regs *regs)
+{
+       struct sig_dbg_op op;
+       int i;
+       unsigned long new_msr = regs->msr;
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+       unsigned long new_dbcr0 = current->thread.dbcr0;
+#endif
+
+       for (i=0; i<ndbg; i++) {
+               if (__copy_from_user(&op, dbg, sizeof(op)))
+                       return -EFAULT;
+               switch (op.dbg_type) {
+               case SIG_DBG_SINGLE_STEPPING:
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+                       if (op.dbg_value) {
+                               new_msr |= MSR_DE;
+                               new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
+                       } else {
+                               new_msr &= ~MSR_DE;
+                               new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
+                       }
+#else
+                       if (op.dbg_value)
+                               new_msr |= MSR_SE;
+                       else
+                               new_msr &= ~MSR_SE;
+#endif
+                       break;
+               case SIG_DBG_BRANCH_TRACING:
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+                       return -EINVAL;
+#else
+                       if (op.dbg_value)
+                               new_msr |= MSR_BE;
+                       else
+                               new_msr &= ~MSR_BE;
+#endif
+                       break;
+
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       /* We wait until here to actually install the values in the
+          registers so if we fail in the above loop, it will not
+          affect the contents of these registers.  After this point,
+          failure is a problem, anyway, and it's very unlikely unless
+          the user is really doing something wrong. */
+       regs->msr = new_msr;
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+       current->thread.dbcr0 = new_dbcr0;
+#endif
+
+       /*
+        * If we get a fault copying the context into the kernel's
+        * image of the user's registers, we can't just return -EFAULT
+        * because the user's registers will be corrupted.  For instance
+        * the NIP value may have been updated but not some of the
+        * other registers.  Given that we have done the access_ok
+        * and successfully read the first and last bytes of the region
+        * above, this should only happen in an out-of-memory situation
+        * or if another thread unmaps the region containing the context.
+        * We kill the task with a SIGSEGV in this situation.
+        */
+       if (do_setcontext(ctx, regs, 1)) {
+               force_sig(SIGSEGV, current);
+               goto out;
+       }
+
+       /*
+        * It's not clear whether or why it is desirable to save the
+        * sigaltstack setting on signal delivery and restore it on
+        * signal return.  But other architectures do this and we have
+        * always done it up until now so it is probably better not to
+        * change it.  -- paulus
+        */
+       do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
+
+       sigreturn_exit(regs);
+       /* doesn't actually return back to here */
+
+ out:
+       return 0;
+}
+#endif
 
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal32(unsigned long sig, struct k_sigaction *ka,
-                           siginfo_t *info, sigset_t *oldset,
-                           struct pt_regs * regs, unsigned long newsp)
+static int handle_signal(unsigned long sig, struct k_sigaction *ka,
+               siginfo_t *info, sigset_t *oldset, struct pt_regs *regs,
+               unsigned long newsp)
 {
-       struct sigcontext32 __user *sc;
-       struct sigregs32 __user *frame;
+       struct sigcontext __user *sc;
+       struct sigregs __user *frame;
        unsigned long origsp = newsp;
 
        /* Set up Signal Frame */
-       newsp -= sizeof(struct sigregs32);
-       frame = (struct sigregs32 __user *) newsp;
+       newsp -= sizeof(struct sigregs);
+       frame = (struct sigregs __user *) newsp;
 
        /* Put a sigcontext on the stack */
        newsp -= sizeof(*sc);
-       sc = (struct sigcontext32 __user *) newsp;
+       sc = (struct sigcontext __user *) newsp;
 
        /* create a stack frame for the caller of the handler */
-       newsp -= __SIGNAL_FRAMESIZE32;
+       newsp -= __SIGNAL_FRAMESIZE;
 
        if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
                goto badframe;
 
 #if _NSIG != 64
-#error "Please adjust handle_signal32()"
+#error "Please adjust handle_signal()"
 #endif
-       if (__put_user((u32)(u64)ka->sa.sa_handler, &sc->handler)
+       if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
            || __put_user(oldset->sig[0], &sc->oldmask)
+#ifdef CONFIG_PPC64
            || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
-           || __put_user((u32)(u64)frame, &sc->regs)
+#else
+           || __put_user(oldset->sig[1], &sc->_unused[3])
+#endif
+           || __put_user(to_user_ptr(frame), &sc->regs)
            || __put_user(sig, &sc->signal))
                goto badframe;
 
+#ifdef CONFIG_PPC64
        if (vdso32_sigtramp && current->thread.vdso_base) {
                if (save_user_regs(regs, &frame->mctx, 0))
                        goto badframe;
                regs->link = current->thread.vdso_base + vdso32_sigtramp;
-       } else {
+       } else
+#endif
+       {
                if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
                        goto badframe;
                regs->link = (unsigned long) frame->mctx.tramp;
@@ -844,22 +1105,24 @@ static int handle_signal32(unsigned long sig, struct k_sigaction *ka,
 
        if (put_user(regs->gpr[1], (u32 __user *)newsp))
                goto badframe;
-       regs->gpr[1] = (unsigned long) newsp;
+       regs->gpr[1] = newsp;
        regs->gpr[3] = sig;
        regs->gpr[4] = (unsigned long) sc;
        regs->nip = (unsigned long) ka->sa.sa_handler;
        regs->trap = 0;
+#ifdef CONFIG_PPC64
        regs->result = 0;
 
        if (test_thread_flag(TIF_SINGLESTEP))
                ptrace_notify(SIGTRAP);
+#endif
 
        return 1;
 
 badframe:
-#if DEBUG_SIG
-       printk("badframe in handle_signal, regs=%p frame=%x newsp=%x\n",
-              regs, frame, *newspp);
+#ifdef DEBUG_SIG
+       printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
+              regs, frame, newsp);
 #endif
        force_sigsegv(sig, current);
        return 0;
@@ -868,65 +1131,69 @@ badframe:
 /*
  * Do a signal return; undo the signal stack.
  */
-long sys32_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
+long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
                       struct pt_regs *regs)
 {
-       struct sigcontext32 __user *sc;
-       struct sigcontext32 sigctx;
-       struct mcontext32 __user *sr;
+       struct sigcontext __user *sc;
+       struct sigcontext sigctx;
+       struct mcontext __user *sr;
        sigset_t set;
-       int ret;
 
        /* Always make any pending restarted system calls return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-       sc = (struct sigcontext32 __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32);
+       sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
        if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
                goto badframe;
 
+#ifdef CONFIG_PPC64
        /*
         * Note that PPC32 puts the upper 32 bits of the sigmask in the
         * unused part of the signal stackframe
         */
        set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
+#else
+       set.sig[0] = sigctx.oldmask;
+       set.sig[1] = sigctx._unused[3];
+#endif
        restore_sigmask(&set);
 
-       sr = (struct mcontext32 __user *)(u64)sigctx.regs;
+       sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
        if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
            || restore_user_regs(regs, sr, 1))
                goto badframe;
 
-       ret = regs->result;
-       return ret;
+#ifdef CONFIG_PPC64
+       return (int)regs->result;
+#else
+       sigreturn_exit(regs);           /* doesn't return */
+       return 0;
+#endif
 
 badframe:
        force_sig(SIGSEGV, current);
        return 0;
 }
 
-
-
-/*
- *  Start of do_signal32 routine
- *
- *   This routine gets control when a pending signal needs to be processed
- *     in the 32 bit target thread -
- *
- *   It handles both rt and non-rt signals
- */
-
 /*
  * Note that 'init' is a special process: it doesn't get signals it doesn't
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-
-int do_signal32(sigset_t *oldset, struct pt_regs *regs)
+int do_signal(sigset_t *oldset, struct pt_regs *regs)
 {
        siginfo_t info;
+       struct k_sigaction ka;
        unsigned int frame, newsp;
        int signr, ret;
-       struct k_sigaction ka;
+
+#ifdef CONFIG_PPC32
+       if (try_to_freeze()) {
+               signr = 0;
+               if (!signal_pending(current))
+                       goto no_signal;
+       }
+#endif
 
        if (!oldset)
                oldset = &current->blocked;
@@ -934,7 +1201,9 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
        newsp = frame = 0;
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-
+#ifdef CONFIG_PPC32
+no_signal:
+#endif
        if (TRAP(regs) == 0x0C00                /* System Call! */
            && regs->ccr & 0x10000000           /* error signalled */
            && ((ret = regs->gpr[3]) == ERESTARTSYS
@@ -964,12 +1233,13 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
                return 0;               /* no signals delivered */
 
        if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
-           && (!on_sig_stack(regs->gpr[1])))
-               newsp = (current->sas_ss_sp + current->sas_ss_size);
+           && !on_sig_stack(regs->gpr[1]))
+               newsp = current->sas_ss_sp + current->sas_ss_size;
        else
                newsp = regs->gpr[1];
        newsp &= ~0xfUL;
 
+#ifdef CONFIG_PPC64
        /*
         * Reenable the DABR before delivering the signal to
         * user space. The DABR will have been cleared if it
@@ -977,12 +1247,13 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
         */
        if (current->thread.dabr)
                set_dabr(current->thread.dabr);
+#endif
 
        /* Whee!  Actually deliver the signal.  */
        if (ka.sa.sa_flags & SA_SIGINFO)
-               ret = handle_rt_signal32(signr, &ka, &info, oldset, regs, newsp);
+               ret = handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
        else
-               ret = handle_signal32(signr, &ka, &info, oldset, regs, newsp);
+               ret = handle_signal(signr, &ka, &info, oldset, regs, newsp);
 
        if (ret) {
                spin_lock_irq(&current->sighand->siglock);
similarity index 74%
rename from arch/ppc64/kernel/sys_ppc32.c
rename to arch/powerpc/kernel/sys_ppc32.c
index e93c134..a8210ed 100644 (file)
@@ -53,8 +53,7 @@
 #include <asm/time.h>
 #include <asm/mmu_context.h>
 #include <asm/systemcfg.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 /* readdir & getdents */
 #define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
@@ -114,96 +113,6 @@ out:
        return error;
 }
 
-struct linux_dirent32 {
-       u32             d_ino;
-       u32             d_off;
-       unsigned short  d_reclen;
-       char            d_name[1];
-};
-
-struct getdents_callback32 {
-       struct linux_dirent32 __user * current_dir;
-       struct linux_dirent32 __user * previous;
-       int count;
-       int error;
-};
-
-static int filldir(void * __buf, const char * name, int namlen, off_t offset,
-                  ino_t ino, unsigned int d_type)
-{
-       struct linux_dirent32 __user * dirent;
-       struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf;
-       int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 2);
-
-       buf->error = -EINVAL;   /* only used if we fail.. */
-       if (reclen > buf->count)
-               return -EINVAL;
-       dirent = buf->previous;
-       if (dirent) {
-               if (__put_user(offset, &dirent->d_off))
-                       goto efault;
-       }
-       dirent = buf->current_dir;
-       if (__put_user(ino, &dirent->d_ino))
-               goto efault;
-       if (__put_user(reclen, &dirent->d_reclen))
-               goto efault;
-       if (copy_to_user(dirent->d_name, name, namlen))
-               goto efault;
-       if (__put_user(0, dirent->d_name + namlen))
-               goto efault;
-       if (__put_user(d_type, (char __user *) dirent + reclen - 1))
-               goto efault;
-       buf->previous = dirent;
-       dirent = (void __user *)dirent + reclen;
-       buf->current_dir = dirent;
-       buf->count -= reclen;
-       return 0;
-efault:
-       buf->error = -EFAULT;
-       return -EFAULT;
-}
-
-asmlinkage long sys32_getdents(unsigned int fd, struct linux_dirent32 __user *dirent,
-                   unsigned int count)
-{
-       struct file * file;
-       struct linux_dirent32 __user * lastdirent;
-       struct getdents_callback32 buf;
-       int error;
-
-       error = -EFAULT;
-       if (!access_ok(VERIFY_WRITE, dirent, count))
-               goto out;
-
-       error = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-
-       buf.current_dir = dirent;
-       buf.previous = NULL;
-       buf.count = count;
-       buf.error = 0;
-
-       error = vfs_readdir(file, (filldir_t)filldir, &buf);
-       if (error < 0)
-               goto out_putf;
-       error = buf.error;
-       lastdirent = buf.previous;
-       if (lastdirent) {
-               if (put_user(file->f_pos, &lastdirent->d_off))
-                       error = -EFAULT;
-               else
-                       error = count - buf.count;
-       }
-
-out_putf:
-       fput(file);
-out:
-       return error;
-}
-
 asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
                compat_ulong_t __user *outp, compat_ulong_t __user *exp,
                compat_uptr_t tvp_x)
@@ -248,7 +157,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_sysfs(u32 option, u32 arg1, u32 arg2)
+asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
 {
        return sys_sysfs((int)option, arg1, arg2);
 }
@@ -270,7 +179,7 @@ struct timex32 {
 extern int do_adjtimex(struct timex *);
 extern void ppc_adjtimex(void);
 
-asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
+asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
 {
        struct timex txc;
        int ret;
@@ -329,7 +238,7 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
        return ret;
 }
 
-asmlinkage long sys32_pause(void)
+asmlinkage long compat_sys_pause(void)
 {
        current->state = TASK_INTERRUPTIBLE;
        schedule();
@@ -375,7 +284,7 @@ struct sysinfo32 {
        char _f[20-2*sizeof(int)-sizeof(int)];
 };
 
-asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
+asmlinkage long compat_sys_sysinfo(struct sysinfo32 __user *info)
 {
        struct sysinfo s;
        int ret, err;
@@ -432,7 +341,7 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
    sorts of things, like timeval and itimerval.  */
 extern struct timezone sys_tz;
 
-asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
+asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
 {
        if (tv) {
                struct timeval ktv;
@@ -450,7 +359,7 @@ asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv, struct time
 
 
 
-asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
+asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
 {
        struct timespec kts;
        struct timezone ktz;
@@ -468,7 +377,7 @@ asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, struct time
 }
 
 #ifdef CONFIG_SYSVIPC
-long sys32_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
+long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
               u32 fifth)
 {
        int version;
@@ -539,7 +448,7 @@ long sys32_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
+asmlinkage long compat_sys_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
 {
        mm_segment_t old_fs = get_fs();
        int ret;
@@ -561,7 +470,7 @@ asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offs
        return ret;
 }
 
-asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
+asmlinkage int compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
 {
        mm_segment_t old_fs = get_fs();
        int ret;
@@ -583,7 +492,7 @@ asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *off
        return ret;
 }
 
-long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2,
+long compat_sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
                  unsigned long a3, unsigned long a4, unsigned long a5,
                  struct pt_regs *regs)
 {
@@ -610,58 +519,12 @@ out:
        return error;
 }
 
-/* Set up a thread for executing a new program. */
-void start_thread32(struct pt_regs* regs, unsigned long nip, unsigned long sp)
-{
-       set_fs(USER_DS);
-
-       /*
-        * If we exec out of a kernel thread then thread.regs will not be
-        * set. Do it now.
-        */
-       if (!current->thread.regs) {
-               unsigned long childregs = (unsigned long)current->thread_info +
-                                               THREAD_SIZE;
-               childregs -= sizeof(struct pt_regs);
-               current->thread.regs = (struct pt_regs *)childregs;
-       }
-
-       /*
-        * ELF_PLAT_INIT already clears all registers but it also sets r2.
-        * So just clear r2 here.
-        */
-       regs->gpr[2] = 0;
-
-       regs->nip = nip;
-       regs->gpr[1] = sp;
-       regs->msr = MSR_USER32;
-#ifndef CONFIG_SMP
-       if (last_task_used_math == current)
-               last_task_used_math = 0;
-#endif /* CONFIG_SMP */
-       current->thread.fpscr = 0;
-       memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
-#ifdef CONFIG_ALTIVEC
-#ifndef CONFIG_SMP
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = 0;
-#endif /* CONFIG_SMP */
-       memset(current->thread.vr, 0, sizeof(current->thread.vr));
-       current->thread.vscr.u[0] = 0;
-       current->thread.vscr.u[1] = 0;
-       current->thread.vscr.u[2] = 0;
-       current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
-       current->thread.vrsave = 0;
-       current->thread.used_vr = 0;
-#endif /* CONFIG_ALTIVEC */
-}
-
 /* Note: it is necessary to treat option as an unsigned int, 
  * with the corresponding cast to a signed int to insure that the 
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
+asmlinkage long compat_sys_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
 {
        return sys_prctl((int)option,
                         (unsigned long) arg2,
@@ -675,7 +538,7 @@ asmlinkage long sys32_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval)
+asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval)
 {
        struct timespec t;
        int ret;
@@ -690,7 +553,7 @@ asmlinkage long sys32_sched_rr_get_interval(u32 pid, struct compat_timespec __us
        return ret;
 }
 
-asmlinkage int sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
+asmlinkage int compat_sys_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
 {
        return sys_pciconfig_read((unsigned long) bus,
                                  (unsigned long) dfn,
@@ -699,7 +562,7 @@ asmlinkage int sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf
                                  compat_ptr(ubuf));
 }
 
-asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
+asmlinkage int compat_sys_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
 {
        return sys_pciconfig_write((unsigned long) bus,
                                   (unsigned long) dfn,
@@ -708,7 +571,7 @@ asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubu
                                   compat_ptr(ubuf));
 }
 
-asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
+asmlinkage int compat_sys_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
 {
        return sys_pciconfig_iobase(which, in_bus, in_devfn);
 }
@@ -719,7 +582,7 @@ asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_access(const char __user * filename, u32 mode)
+asmlinkage long compat_sys_access(const char __user * filename, u32 mode)
 {
        return sys_access(filename, (int)mode);
 }
@@ -730,7 +593,7 @@ asmlinkage long sys32_access(const char __user * filename, u32 mode)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_creat(const char __user * pathname, u32 mode)
+asmlinkage long compat_sys_creat(const char __user * pathname, u32 mode)
 {
        return sys_creat(pathname, (int)mode);
 }
@@ -741,7 +604,7 @@ asmlinkage long sys32_creat(const char __user * pathname, u32 mode)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options)
+asmlinkage long compat_sys_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options)
 {
        return sys_waitpid((int)pid, stat_addr, (int)options);
 }
@@ -752,7 +615,7 @@ asmlinkage long sys32_waitpid(u32 pid, unsigned int __user * stat_addr, u32 opti
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_getgroups(u32 gidsetsize, gid_t __user *grouplist)
+asmlinkage long compat_sys_getgroups(u32 gidsetsize, gid_t __user *grouplist)
 {
        return sys_getgroups((int)gidsetsize, grouplist);
 }
@@ -763,7 +626,7 @@ asmlinkage long sys32_getgroups(u32 gidsetsize, gid_t __user *grouplist)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_getpgid(u32 pid)
+asmlinkage long compat_sys_getpgid(u32 pid)
 {
        return sys_getpgid((int)pid);
 }
@@ -775,7 +638,7 @@ asmlinkage long sys32_getpgid(u32 pid)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_getsid(u32 pid)
+asmlinkage long compat_sys_getsid(u32 pid)
 {
        return sys_getsid((int)pid);
 }
@@ -786,7 +649,7 @@ asmlinkage long sys32_getsid(u32 pid)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_kill(u32 pid, u32 sig)
+asmlinkage long compat_sys_kill(u32 pid, u32 sig)
 {
        return sys_kill((int)pid, (int)sig);
 }
@@ -797,12 +660,12 @@ asmlinkage long sys32_kill(u32 pid, u32 sig)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_mkdir(const char __user * pathname, u32 mode)
+asmlinkage long compat_sys_mkdir(const char __user * pathname, u32 mode)
 {
        return sys_mkdir(pathname, (int)mode);
 }
 
-long sys32_nice(u32 increment)
+long compat_sys_nice(u32 increment)
 {
        /* sign extend increment */
        return sys_nice((int)increment);
@@ -819,7 +682,7 @@ off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_readlink(const char __user * path, char __user * buf, u32 bufsiz)
+asmlinkage long compat_sys_readlink(const char __user * path, char __user * buf, u32 bufsiz)
 {
        return sys_readlink(path, buf, (int)bufsiz);
 }
@@ -829,7 +692,7 @@ asmlinkage long sys32_readlink(const char __user * path, char __user * buf, u32
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_sched_get_priority_max(u32 policy)
+asmlinkage long compat_sys_sched_get_priority_max(u32 policy)
 {
        return sys_sched_get_priority_max((int)policy);
 }
@@ -840,7 +703,7 @@ asmlinkage long sys32_sched_get_priority_max(u32 policy)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_sched_get_priority_min(u32 policy)
+asmlinkage long compat_sys_sched_get_priority_min(u32 policy)
 {
        return sys_sched_get_priority_min((int)policy);
 }
@@ -851,7 +714,7 @@ asmlinkage long sys32_sched_get_priority_min(u32 policy)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_sched_getparam(u32 pid, struct sched_param __user *param)
+asmlinkage long compat_sys_sched_getparam(u32 pid, struct sched_param __user *param)
 {
        return sys_sched_getparam((int)pid, param);
 }
@@ -862,7 +725,7 @@ asmlinkage long sys32_sched_getparam(u32 pid, struct sched_param __user *param)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_sched_getscheduler(u32 pid)
+asmlinkage long compat_sys_sched_getscheduler(u32 pid)
 {
        return sys_sched_getscheduler((int)pid);
 }
@@ -873,7 +736,7 @@ asmlinkage long sys32_sched_getscheduler(u32 pid)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_sched_setparam(u32 pid, struct sched_param __user *param)
+asmlinkage long compat_sys_sched_setparam(u32 pid, struct sched_param __user *param)
 {
        return sys_sched_setparam((int)pid, param);
 }
@@ -884,7 +747,7 @@ asmlinkage long sys32_sched_setparam(u32 pid, struct sched_param __user *param)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param)
+asmlinkage long compat_sys_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param)
 {
        return sys_sched_setscheduler((int)pid, (int)policy, param);
 }
@@ -895,7 +758,7 @@ asmlinkage long sys32_sched_setscheduler(u32 pid, u32 policy, struct sched_param
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_setdomainname(char __user *name, u32 len)
+asmlinkage long compat_sys_setdomainname(char __user *name, u32 len)
 {
        return sys_setdomainname(name, (int)len);
 }
@@ -906,13 +769,13 @@ asmlinkage long sys32_setdomainname(char __user *name, u32 len)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_setgroups(u32 gidsetsize, gid_t __user *grouplist)
+asmlinkage long compat_sys_setgroups(u32 gidsetsize, gid_t __user *grouplist)
 {
        return sys_setgroups((int)gidsetsize, grouplist);
 }
 
 
-asmlinkage long sys32_sethostname(char __user *name, u32 len)
+asmlinkage long compat_sys_sethostname(char __user *name, u32 len)
 {
        /* sign extend len */
        return sys_sethostname(name, (int)len);
@@ -924,30 +787,30 @@ asmlinkage long sys32_sethostname(char __user *name, u32 len)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_setpgid(u32 pid, u32 pgid)
+asmlinkage long compat_sys_setpgid(u32 pid, u32 pgid)
 {
        return sys_setpgid((int)pid, (int)pgid);
 }
 
-long sys32_getpriority(u32 which, u32 who)
+long compat_sys_getpriority(u32 which, u32 who)
 {
        /* sign extend which and who */
        return sys_getpriority((int)which, (int)who);
 }
 
-long sys32_setpriority(u32 which, u32 who, u32 niceval)
+long compat_sys_setpriority(u32 which, u32 who, u32 niceval)
 {
        /* sign extend which, who and niceval */
        return sys_setpriority((int)which, (int)who, (int)niceval);
 }
 
-long sys32_ioprio_get(u32 which, u32 who)
+long compat_sys_ioprio_get(u32 which, u32 who)
 {
        /* sign extend which and who */
        return sys_ioprio_get((int)which, (int)who);
 }
 
-long sys32_ioprio_set(u32 which, u32 who, u32 ioprio)
+long compat_sys_ioprio_set(u32 which, u32 who, u32 ioprio)
 {
        /* sign extend which, who and ioprio */
        return sys_ioprio_set((int)which, (int)who, (int)ioprio);
@@ -958,12 +821,12 @@ long sys32_ioprio_set(u32 which, u32 who, u32 ioprio)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_ssetmask(u32 newmask)
+asmlinkage long compat_sys_ssetmask(u32 newmask)
 {
        return sys_ssetmask((int) newmask);
 }
 
-asmlinkage long sys32_syslog(u32 type, char __user * buf, u32 len)
+asmlinkage long compat_sys_syslog(u32 type, char __user * buf, u32 len)
 {
        /* sign extend len */
        return sys_syslog(type, buf, (int)len);
@@ -975,7 +838,7 @@ asmlinkage long sys32_syslog(u32 type, char __user * buf, u32 len)
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long sys32_umask(u32 mask)
+asmlinkage long compat_sys_umask(u32 mask)
 {
        return sys_umask((int)mask);
 }
@@ -991,7 +854,7 @@ struct __sysctl_args32 {
        u32 __unused[4];
 };
 
-asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
+asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
 {
        struct __sysctl_args32 tmp;
        int error;
@@ -1032,55 +895,7 @@ asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
 }
 #endif
 
-asmlinkage int sys32_uname(struct old_utsname __user * name)
-{
-       int err = 0;
-       
-       down_read(&uts_sem);
-       if (copy_to_user(name, &system_utsname, sizeof(*name)))
-               err = -EFAULT;
-       up_read(&uts_sem);
-       if (!err && personality(current->personality) == PER_LINUX32) {
-               /* change "ppc64" to "ppc" */
-               if (__put_user(0, name->machine + 3)
-                   || __put_user(0, name->machine + 4))
-                       err = -EFAULT;
-       }
-       return err;
-}
-
-asmlinkage int sys32_olduname(struct oldold_utsname __user * name)
-{
-       int error;
-
-       if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
-               return -EFAULT;
-  
-       down_read(&uts_sem);
-       error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
-       error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
-       error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
-       error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
-       error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
-       error |= __put_user(0,name->release+__OLD_UTS_LEN);
-       error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
-       error |= __put_user(0,name->version+__OLD_UTS_LEN);
-       error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
-       error |= __put_user(0,name->machine+__OLD_UTS_LEN);
-       if (personality(current->personality) == PER_LINUX32) {
-               /* change "ppc64" to "ppc" */
-               error |= __put_user(0, name->machine + 3);
-               error |= __put_user(0, name->machine + 4);
-       }
-       
-       up_read(&uts_sem);
-
-       error = error ? -EFAULT : 0;
-       
-       return error;
-}
-
-unsigned long sys32_mmap2(unsigned long addr, size_t len,
+unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
                          unsigned long prot, unsigned long flags,
                          unsigned long fd, unsigned long pgoff)
 {
@@ -1088,29 +903,7 @@ unsigned long sys32_mmap2(unsigned long addr, size_t len,
        return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
 }
 
-int get_compat_timeval(struct timeval *tv, struct compat_timeval __user *ctv)
-{
-       return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
-               __get_user(tv->tv_sec, &ctv->tv_sec) ||
-               __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
-}
-
-asmlinkage long sys32_utimes(char __user *filename, struct compat_timeval __user *tvs)
-{
-       struct timeval ktvs[2], *ptr;
-
-       ptr = NULL;
-       if (tvs) {
-               if (get_compat_timeval(&ktvs[0], &tvs[0]) ||
-                   get_compat_timeval(&ktvs[1], &tvs[1]))
-                       return -EFAULT;
-               ptr = ktvs;
-       }
-
-       return do_utimes(filename, ptr);
-}
-
-long sys32_tgkill(u32 tgid, u32 pid, int sig)
+long compat_sys_tgkill(u32 tgid, u32 pid, int sig)
 {
        /* sign extend tgid, pid */
        return sys_tgkill((int)tgid, (int)pid, sig);
@@ -1121,30 +914,30 @@ long sys32_tgkill(u32 tgid, u32 pid, int sig)
  * The 32 bit ABI passes long longs in an odd even register pair.
  */
 
-compat_ssize_t sys32_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
+compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
                             u32 reg6, u32 poshi, u32 poslo)
 {
        return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
 }
 
-compat_ssize_t sys32_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count,
+compat_ssize_t compat_sys_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count,
                              u32 reg6, u32 poshi, u32 poslo)
 {
        return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
 }
 
-compat_ssize_t sys32_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
+compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
 {
        return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
 }
 
-asmlinkage int sys32_truncate64(const char __user * path, u32 reg4,
+asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
                                unsigned long high, unsigned long low)
 {
        return sys_truncate(path, (high << 32) | low);
 }
 
-asmlinkage int sys32_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
+asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
                                 unsigned long low)
 {
        return sys_ftruncate(fd, (high << 32) | low);
@@ -1164,13 +957,6 @@ long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
                             advice);
 }
 
-long ppc32_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
-                       u32 len_high, u32 len_low)
-{
-       return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
-                            (u64)len_high << 32 | len_low, advice);
-}
-
 long ppc32_timer_create(clockid_t clock,
                        struct compat_sigevent __user *ev32,
                        timer_t __user *timer_id)
@@ -1203,7 +989,7 @@ long ppc32_timer_create(clockid_t clock,
        return err;
 }
 
-asmlinkage long sys32_add_key(const char __user *_type,
+asmlinkage long compat_sys_add_key(const char __user *_type,
                              const char __user *_description,
                              const void __user *_payload,
                              u32 plen,
@@ -1212,7 +998,7 @@ asmlinkage long sys32_add_key(const char __user *_type,
        return sys_add_key(_type, _description, _payload, plen, ringid);
 }
 
-asmlinkage long sys32_request_key(const char __user *_type,
+asmlinkage long compat_sys_request_key(const char __user *_type,
                                  const char __user *_description,
                                  const char __user *_callout_info,
                                  u32 destringid)
similarity index 54%
rename from arch/ppc64/kernel/syscalls.c
rename to arch/powerpc/kernel/syscalls.c
index 05f1663..f72ced1 100644 (file)
@@ -1,7 +1,6 @@
 /*
- * linux/arch/ppc64/kernel/sys_ppc.c
+ *  Implementation of various system calls for Linux/PowerPC
  *
- *  PowerPC version 
  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  *
  * Derived from "arch/i386/kernel/sys_i386.c"
@@ -52,9 +51,8 @@ extern unsigned long wall_jiffies;
  *
  * This is really horribly ugly.
  */
-asmlinkage int 
-sys_ipc (uint call, int first, unsigned long second, long third,
-        void __user *ptr, long fifth)
+int sys_ipc(uint call, int first, unsigned long second, long third,
+           void __user *ptr, long fifth)
 {
        int version, ret;
 
@@ -88,7 +86,7 @@ sys_ipc (uint call, int first, unsigned long second, long third,
        }
        case MSGSND:
                ret = sys_msgsnd(first, (struct msgbuf __user *)ptr,
-                                 (size_t)second, third);
+                                (size_t)second, third);
                break;
        case MSGRCV:
                switch (version) {
@@ -113,41 +111,29 @@ sys_ipc (uint call, int first, unsigned long second, long third,
                }
                break;
        case MSGGET:
-               ret = sys_msgget ((key_t)first, (int)second);
+               ret = sys_msgget((key_t)first, (int)second);
                break;
        case MSGCTL:
                ret = sys_msgctl(first, (int)second,
                                  (struct msqid_ds __user *)ptr);
                break;
-       case SHMAT:
-               switch (version) {
-               default: {
-                       ulong raddr;
-                       ret = do_shmat(first, (char __user *) ptr,
-                                       (int)second, &raddr);
-                       if (ret)
-                               break;
-                       ret = put_user (raddr, (ulong __user *) third);
-                       break;
-               }
-               case 1: /* iBCS2 emulator entry point */
-                       ret = -EINVAL;
-                       if (!segment_eq(get_fs(), get_ds()))
-                               break;
-                       ret = do_shmat(first, (char __user *)ptr,
-                                       (int)second, (ulong *)third);
+       case SHMAT: {
+               ulong raddr;
+               ret = do_shmat(first, (char __user *)ptr, (int)second, &raddr);
+               if (ret)
                        break;
-               }
+               ret = put_user(raddr, (ulong __user *) third);
                break;
-       case SHMDT: 
-               ret = sys_shmdt ((char __user *)ptr);
+       }
+       case SHMDT:
+               ret = sys_shmdt((char __user *)ptr);
                break;
        case SHMGET:
-               ret = sys_shmget (first, (size_t)second, third);
+               ret = sys_shmget(first, (size_t)second, third);
                break;
        case SHMCTL:
                ret = sys_shmctl(first, (int)second,
-                                 (struct shmid_ds __user *)ptr);
+                                (struct shmid_ds __user *)ptr);
                break;
        }
 
@@ -158,43 +144,89 @@ sys_ipc (uint call, int first, unsigned long second, long third,
  * sys_pipe() is the normal C calling standard for creating
  * a pipe. It's not the way unix traditionally does this, though.
  */
-asmlinkage int sys_pipe(int __user *fildes)
+int sys_pipe(int __user *fildes)
 {
        int fd[2];
        int error;
-       
+
        error = do_pipe(fd);
        if (!error) {
                if (copy_to_user(fildes, fd, 2*sizeof(int)))
                        error = -EFAULT;
        }
-       
        return error;
 }
 
-unsigned long sys_mmap(unsigned long addr, size_t len,
-                      unsigned long prot, unsigned long flags,
-                      unsigned long fd, off_t offset)
+static inline unsigned long do_mmap2(unsigned long addr, size_t len,
+                       unsigned long prot, unsigned long flags,
+                       unsigned long fd, unsigned long off, int shift)
 {
        struct file * file = NULL;
-       unsigned long ret = -EBADF;
+       unsigned long ret = -EINVAL;
 
+       if (shift) {
+               if (off & ((1 << shift) - 1))
+                       goto out;
+               off >>= shift;
+       }
+               
+       ret = -EBADF;
        if (!(flags & MAP_ANONYMOUS)) {
                if (!(file = fget(fd)))
                        goto out;
        }
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
        down_write(&current->mm->mmap_sem);
-       ret = do_mmap(file, addr, len, prot, flags, offset);
+       ret = do_mmap_pgoff(file, addr, len, prot, flags, off);
        up_write(&current->mm->mmap_sem);
        if (file)
                fput(file);
-
 out:
        return ret;
 }
 
+unsigned long sys_mmap2(unsigned long addr, size_t len,
+                       unsigned long prot, unsigned long flags,
+                       unsigned long fd, unsigned long pgoff)
+{
+       return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12);
+}
+
+unsigned long sys_mmap(unsigned long addr, size_t len,
+                      unsigned long prot, unsigned long flags,
+                      unsigned long fd, off_t offset)
+{
+       return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
+}
+
+#ifdef CONFIG_PPC32
+/*
+ * Due to some executables calling the wrong select we sometimes
+ * get wrong args.  This determines how the args are being passed
+ * (a single ptr to them all args passed) then calls
+ * sys_select() with the appropriate args. -- Cort
+ */
+int
+ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
+{
+       if ( (unsigned long)n >= 4096 )
+       {
+               unsigned long __user *buffer = (unsigned long __user *)n;
+               if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
+                   || __get_user(n, buffer)
+                   || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
+                   || __get_user(outp, ((fd_set  __user * __user *)(buffer+2)))
+                   || __get_user(exp, ((fd_set  __user * __user *)(buffer+3)))
+                   || __get_user(tvp, ((struct timeval  __user * __user *)(buffer+4))))
+                       return -EFAULT;
+       }
+       return sys_select(n, inp, outp, exp, tvp);
+}
+#endif
+
+#ifdef CONFIG_PPC64
 long ppc64_personality(unsigned long personality)
 {
        long ret;
@@ -207,8 +239,25 @@ long ppc64_personality(unsigned long personality)
                ret = PER_LINUX;
        return ret;
 }
+#endif
+
+#ifdef CONFIG_PPC64
+#define OVERRIDE_MACHINE    (personality(current->personality) == PER_LINUX32)
+#else
+#define OVERRIDE_MACHINE    0
+#endif
+
+static inline int override_machine(char *mach)
+{
+       if (OVERRIDE_MACHINE) {
+               /* change ppc64 to ppc */
+               if (__put_user(0, mach+3) || __put_user(0, mach+4))
+                       return -EFAULT;
+       }
+       return 0;
+}
 
-long ppc64_newuname(struct new_utsname __user * name)
+long ppc_newuname(struct new_utsname __user * name)
 {
        int err = 0;
 
@@ -216,16 +265,54 @@ long ppc64_newuname(struct new_utsname __user * name)
        if (copy_to_user(name, &system_utsname, sizeof(*name)))
                err = -EFAULT;
        up_read(&uts_sem);
-       if (!err && personality(current->personality) == PER_LINUX32) {
-               /* change ppc64 to ppc */
-               if (__put_user(0, name->machine + 3)
-                   || __put_user(0, name->machine + 4))
-                       err = -EFAULT;
-       }
+       if (!err)
+               err = override_machine(name->machine);
        return err;
 }
 
-asmlinkage time_t sys64_time(time_t __user * tloc)
+int sys_uname(struct old_utsname __user *name)
+{
+       int err = 0;
+       
+       down_read(&uts_sem);
+       if (copy_to_user(name, &system_utsname, sizeof(*name)))
+               err = -EFAULT;
+       up_read(&uts_sem);
+       if (!err)
+               err = override_machine(name->machine);
+       return err;
+}
+
+int sys_olduname(struct oldold_utsname __user *name)
+{
+       int error;
+
+       if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
+               return -EFAULT;
+  
+       down_read(&uts_sem);
+       error = __copy_to_user(&name->sysname, &system_utsname.sysname,
+                              __OLD_UTS_LEN);
+       error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
+       error |= __copy_to_user(&name->nodename, &system_utsname.nodename,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
+       error |= __copy_to_user(&name->release, &system_utsname.release,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->release + __OLD_UTS_LEN);
+       error |= __copy_to_user(&name->version, &system_utsname.version,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->version + __OLD_UTS_LEN);
+       error |= __copy_to_user(&name->machine, &system_utsname.machine,
+                               __OLD_UTS_LEN);
+       error |= override_machine(name->machine);
+       up_read(&uts_sem);
+
+       return error? -EFAULT: 0;
+}
+
+#ifdef CONFIG_PPC64
+time_t sys64_time(time_t __user * tloc)
 {
        time_t secs;
        time_t usecs;
@@ -247,6 +334,14 @@ asmlinkage time_t sys64_time(time_t __user * tloc)
 
        return secs;
 }
+#endif
+
+long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
+                     u32 len_high, u32 len_low)
+{
+       return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
+                            (u64)len_high << 32 | len_low, advice);
+}
 
 void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5,
                     unsigned long r6, unsigned long r7, unsigned long r8,
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
new file mode 100644 (file)
index 0000000..65eaea9
--- /dev/null
@@ -0,0 +1,321 @@
+/*
+ * This file contains the table of syscall-handling functions.
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ *
+ * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
+ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 
+ * 
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <asm/ppc_asm.h>
+
+#ifdef CONFIG_PPC64
+#define SYSCALL(func)          .llong  .sys_##func,.sys_##func
+#define COMPAT_SYS(func)       .llong  .sys_##func,.compat_sys_##func
+#define PPC_SYS(func)          .llong  .ppc_##func,.ppc_##func
+#define OLDSYS(func)           .llong  .sys_ni_syscall,.sys_ni_syscall
+#define SYS32ONLY(func)                .llong  .sys_ni_syscall,.compat_sys_##func
+#define SYSX(f, f3264, f32)    .llong  .f,.f3264
+#else
+#define SYSCALL(func)          .long   sys_##func
+#define COMPAT_SYS(func)       .long   sys_##func
+#define PPC_SYS(func)          .long   ppc_##func
+#define OLDSYS(func)           .long   sys_##func
+#define SYS32ONLY(func)                .long   sys_##func
+#define SYSX(f, f3264, f32)    .long   f32
+#endif
+
+#ifdef CONFIG_PPC64
+#define sys_sigpending sys_ni_syscall
+#define sys_old_getrlimit sys_ni_syscall
+#else
+#define ppc_rtas       sys_ni_syscall
+#endif
+
+_GLOBAL(sys_call_table)
+SYSCALL(restart_syscall)
+SYSCALL(exit)
+PPC_SYS(fork)
+SYSCALL(read)
+SYSCALL(write)
+COMPAT_SYS(open)
+SYSCALL(close)
+COMPAT_SYS(waitpid)
+COMPAT_SYS(creat)
+SYSCALL(link)
+SYSCALL(unlink)
+COMPAT_SYS(execve)
+SYSCALL(chdir)
+SYSX(sys64_time,compat_sys_time,sys_time)
+SYSCALL(mknod)
+SYSCALL(chmod)
+SYSCALL(lchown)
+SYSCALL(ni_syscall)
+OLDSYS(stat)
+SYSX(sys_lseek,ppc32_lseek,sys_lseek)
+SYSCALL(getpid)
+COMPAT_SYS(mount)
+SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
+SYSCALL(setuid)
+SYSCALL(getuid)
+COMPAT_SYS(stime)
+COMPAT_SYS(ptrace)
+SYSCALL(alarm)
+OLDSYS(fstat)
+COMPAT_SYS(pause)
+COMPAT_SYS(utime)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS(access)
+COMPAT_SYS(nice)
+SYSCALL(ni_syscall)
+SYSCALL(sync)
+COMPAT_SYS(kill)
+SYSCALL(rename)
+COMPAT_SYS(mkdir)
+SYSCALL(rmdir)
+SYSCALL(dup)
+SYSCALL(pipe)
+COMPAT_SYS(times)
+SYSCALL(ni_syscall)
+SYSCALL(brk)
+SYSCALL(setgid)
+SYSCALL(getgid)
+SYSCALL(signal)
+SYSCALL(geteuid)
+SYSCALL(getegid)
+SYSCALL(acct)
+SYSCALL(umount)
+SYSCALL(ni_syscall)
+COMPAT_SYS(ioctl)
+COMPAT_SYS(fcntl)
+SYSCALL(ni_syscall)
+COMPAT_SYS(setpgid)
+SYSCALL(ni_syscall)
+SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
+COMPAT_SYS(umask)
+SYSCALL(chroot)
+SYSCALL(ustat)
+SYSCALL(dup2)
+SYSCALL(getppid)
+SYSCALL(getpgrp)
+SYSCALL(setsid)
+SYS32ONLY(sigaction)
+SYSCALL(sgetmask)
+COMPAT_SYS(ssetmask)
+SYSCALL(setreuid)
+SYSCALL(setregid)
+SYSX(sys_ni_syscall,ppc32_sigsuspend,ppc_sigsuspend)
+COMPAT_SYS(sigpending)
+COMPAT_SYS(sethostname)
+COMPAT_SYS(setrlimit)
+COMPAT_SYS(old_getrlimit)
+COMPAT_SYS(getrusage)
+COMPAT_SYS(gettimeofday)
+COMPAT_SYS(settimeofday)
+COMPAT_SYS(getgroups)
+COMPAT_SYS(setgroups)
+SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
+SYSCALL(symlink)
+OLDSYS(lstat)
+COMPAT_SYS(readlink)
+SYSCALL(uselib)
+SYSCALL(swapon)
+SYSCALL(reboot)
+SYSX(sys_ni_syscall,old32_readdir,old_readdir)
+SYSCALL(mmap)
+SYSCALL(munmap)
+SYSCALL(truncate)
+SYSCALL(ftruncate)
+SYSCALL(fchmod)
+SYSCALL(fchown)
+COMPAT_SYS(getpriority)
+COMPAT_SYS(setpriority)
+SYSCALL(ni_syscall)
+COMPAT_SYS(statfs)
+COMPAT_SYS(fstatfs)
+SYSCALL(ni_syscall)
+COMPAT_SYS(socketcall)
+COMPAT_SYS(syslog)
+COMPAT_SYS(setitimer)
+COMPAT_SYS(getitimer)
+COMPAT_SYS(newstat)
+COMPAT_SYS(newlstat)
+COMPAT_SYS(newfstat)
+SYSX(sys_ni_syscall,sys_uname,sys_uname)
+SYSCALL(ni_syscall)
+SYSCALL(vhangup)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS(wait4)
+SYSCALL(swapoff)
+COMPAT_SYS(sysinfo)
+COMPAT_SYS(ipc)
+SYSCALL(fsync)
+SYSX(sys_ni_syscall,ppc32_sigreturn,sys_sigreturn)
+PPC_SYS(clone)
+COMPAT_SYS(setdomainname)
+PPC_SYS(newuname)
+SYSCALL(ni_syscall)
+COMPAT_SYS(adjtimex)
+SYSCALL(mprotect)
+SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
+SYSCALL(ni_syscall)
+SYSCALL(init_module)
+SYSCALL(delete_module)
+SYSCALL(ni_syscall)
+SYSCALL(quotactl)
+COMPAT_SYS(getpgid)
+SYSCALL(fchdir)
+SYSCALL(bdflush)
+COMPAT_SYS(sysfs)
+SYSX(ppc64_personality,ppc64_personality,sys_personality)
+SYSCALL(ni_syscall)
+SYSCALL(setfsuid)
+SYSCALL(setfsgid)
+SYSCALL(llseek)
+COMPAT_SYS(getdents)
+SYSX(sys_select,ppc32_select,ppc_select)
+SYSCALL(flock)
+SYSCALL(msync)
+COMPAT_SYS(readv)
+COMPAT_SYS(writev)
+COMPAT_SYS(getsid)
+SYSCALL(fdatasync)
+COMPAT_SYS(sysctl)
+SYSCALL(mlock)
+SYSCALL(munlock)
+SYSCALL(mlockall)
+SYSCALL(munlockall)
+COMPAT_SYS(sched_setparam)
+COMPAT_SYS(sched_getparam)
+COMPAT_SYS(sched_setscheduler)
+COMPAT_SYS(sched_getscheduler)
+SYSCALL(sched_yield)
+COMPAT_SYS(sched_get_priority_max)
+COMPAT_SYS(sched_get_priority_min)
+COMPAT_SYS(sched_rr_get_interval)
+COMPAT_SYS(nanosleep)
+SYSCALL(mremap)
+SYSCALL(setresuid)
+SYSCALL(getresuid)
+SYSCALL(ni_syscall)
+SYSCALL(poll)
+COMPAT_SYS(nfsservctl)
+SYSCALL(setresgid)
+SYSCALL(getresgid)
+COMPAT_SYS(prctl)
+SYSX(ppc64_rt_sigreturn,ppc32_rt_sigreturn,sys_rt_sigreturn)
+COMPAT_SYS(rt_sigaction)
+COMPAT_SYS(rt_sigprocmask)
+COMPAT_SYS(rt_sigpending)
+COMPAT_SYS(rt_sigtimedwait)
+COMPAT_SYS(rt_sigqueueinfo)
+SYSX(ppc64_rt_sigsuspend,ppc32_rt_sigsuspend,ppc_rt_sigsuspend)
+COMPAT_SYS(pread64)
+COMPAT_SYS(pwrite64)
+SYSCALL(chown)
+SYSCALL(getcwd)
+SYSCALL(capget)
+SYSCALL(capset)
+COMPAT_SYS(sigaltstack)
+SYSX(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+PPC_SYS(vfork)
+COMPAT_SYS(getrlimit)
+COMPAT_SYS(readahead)
+SYS32ONLY(mmap2)
+SYS32ONLY(truncate64)
+SYS32ONLY(ftruncate64)
+SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
+SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
+SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
+COMPAT_SYS(pciconfig_read)
+COMPAT_SYS(pciconfig_write)
+COMPAT_SYS(pciconfig_iobase)
+SYSCALL(ni_syscall)
+SYSCALL(getdents64)
+SYSCALL(pivot_root)
+SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
+SYSCALL(madvise)
+SYSCALL(mincore)
+SYSCALL(gettid)
+SYSCALL(tkill)
+SYSCALL(setxattr)
+SYSCALL(lsetxattr)
+SYSCALL(fsetxattr)
+SYSCALL(getxattr)
+SYSCALL(lgetxattr)
+SYSCALL(fgetxattr)
+SYSCALL(listxattr)
+SYSCALL(llistxattr)
+SYSCALL(flistxattr)
+SYSCALL(removexattr)
+SYSCALL(lremovexattr)
+SYSCALL(fremovexattr)
+COMPAT_SYS(futex)
+COMPAT_SYS(sched_setaffinity)
+COMPAT_SYS(sched_getaffinity)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYS32ONLY(sendfile64)
+COMPAT_SYS(io_setup)
+SYSCALL(io_destroy)
+COMPAT_SYS(io_getevents)
+COMPAT_SYS(io_submit)
+SYSCALL(io_cancel)
+SYSCALL(set_tid_address)
+SYSX(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
+SYSCALL(exit_group)
+SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
+SYSCALL(epoll_create)
+SYSCALL(epoll_ctl)
+SYSCALL(epoll_wait)
+SYSCALL(remap_file_pages)
+SYSX(sys_timer_create,ppc32_timer_create,sys_timer_create)
+COMPAT_SYS(timer_settime)
+COMPAT_SYS(timer_gettime)
+SYSCALL(timer_getoverrun)
+SYSCALL(timer_delete)
+COMPAT_SYS(clock_settime)
+COMPAT_SYS(clock_gettime)
+COMPAT_SYS(clock_getres)
+COMPAT_SYS(clock_nanosleep)
+SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
+COMPAT_SYS(tgkill)
+COMPAT_SYS(utimes)
+COMPAT_SYS(statfs64)
+COMPAT_SYS(fstatfs64)
+SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
+PPC_SYS(rtas)
+OLDSYS(debug_setcontext)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS(mbind)
+COMPAT_SYS(get_mempolicy)
+COMPAT_SYS(set_mempolicy)
+COMPAT_SYS(mq_open)
+SYSCALL(mq_unlink)
+COMPAT_SYS(mq_timedsend)
+COMPAT_SYS(mq_timedreceive)
+COMPAT_SYS(mq_notify)
+COMPAT_SYS(mq_getsetattr)
+COMPAT_SYS(kexec_load)
+COMPAT_SYS(add_key)
+COMPAT_SYS(request_key)
+COMPAT_SYS(keyctl)
+COMPAT_SYS(waitid)
+COMPAT_SYS(ioprio_set)
+COMPAT_SYS(ioprio_get)
+SYSCALL(inotify_init)
+SYSCALL(inotify_add_watch)
+SYSCALL(inotify_rm_watch)
similarity index 72%
rename from arch/ppc64/kernel/time.c
rename to arch/powerpc/kernel/time.c
index b56c6a3..b635c7d 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * 
  * Common time routines among all ppc machines.
  *
  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
 #include <linux/interrupt.h>
 #include <linux/timex.h>
 #include <linux/kernel_stat.h>
-#include <linux/mc146818rtc.h>
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/profile.h>
 #include <linux/cpu.h>
 #include <linux/security.h>
+#include <linux/percpu.h>
+#include <linux/rtc.h>
 
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/nvram.h>
 #include <asm/cache.h>
 #include <asm/machdep.h>
-#ifdef CONFIG_PPC_ISERIES
-#include <asm/iSeries/ItLpQueue.h>
-#include <asm/iSeries/HvCallXm.h>
-#endif
 #include <asm/uaccess.h>
 #include <asm/time.h>
-#include <asm/ppcdebug.h>
 #include <asm/prom.h>
-#include <asm/sections.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+#ifdef CONFIG_PPC64
 #include <asm/systemcfg.h>
 #include <asm/firmware.h>
+#endif
+#ifdef CONFIG_PPC_ISERIES
+#include <asm/iSeries/ItLpQueue.h>
+#include <asm/iSeries/HvCallXm.h>
+#endif
 
 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
 
@@ -81,27 +83,37 @@ unsigned long iSeries_recal_tb = 0;
 static unsigned long first_settimeofday = 1;
 #endif
 
+/* The decrementer counts down by 128 every 128ns on a 601. */
+#define DECREMENTER_COUNT_601  (1000000000 / HZ)
+
 #define XSEC_PER_SEC (1024*1024)
 
+#ifdef CONFIG_PPC64
+#define SCALE_XSEC(xsec, max)  (((xsec) * max) / XSEC_PER_SEC)
+#else
+/* compute ((xsec << 12) * max) >> 32 */
+#define SCALE_XSEC(xsec, max)  mulhwu((xsec) << 12, max)
+#endif
+
 unsigned long tb_ticks_per_jiffy;
 unsigned long tb_ticks_per_usec = 100; /* sane default */
 EXPORT_SYMBOL(tb_ticks_per_usec);
 unsigned long tb_ticks_per_sec;
-unsigned long tb_to_xs;
-unsigned      tb_to_us;
+u64 tb_to_xs;
+unsigned tb_to_us;
 unsigned long processor_freq;
 DEFINE_SPINLOCK(rtc_lock);
 EXPORT_SYMBOL_GPL(rtc_lock);
 
-unsigned long tb_to_ns_scale;
-unsigned long tb_to_ns_shift;
+u64 tb_to_ns_scale;
+unsigned tb_to_ns_shift;
 
 struct gettimeofday_struct do_gtod;
 
 extern unsigned long wall_jiffies;
-extern int smp_tb_synchronized;
 
 extern struct timezone sys_tz;
+static long timezone_offset;
 
 void ppc_adjtimex(void);
 
@@ -110,6 +122,10 @@ static unsigned adjusting_time = 0;
 unsigned long ppc_proc_freq;
 unsigned long ppc_tb_freq;
 
+#ifdef CONFIG_PPC32    /* XXX for now */
+#define boot_cpuid     0
+#endif
+
 static __inline__ void timer_check_rtc(void)
 {
         /*
@@ -129,30 +145,30 @@ static __inline__ void timer_check_rtc(void)
          * seconds like on Intel to avoid problems with non UTC clocks.
          */
         if (ntp_synced() &&
-             xtime.tv_sec - last_rtc_update >= 659 &&
-             abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
-             jiffies - wall_jiffies == 1) {
-           struct rtc_time tm;
-           to_tm(xtime.tv_sec+1, &tm);
-           tm.tm_year -= 1900;
-           tm.tm_mon -= 1;
-            if (ppc_md.set_rtc_time(&tm) == 0)
-                last_rtc_update = xtime.tv_sec+1;
-            else
-                /* Try again one minute later */
-                last_rtc_update += 60;
+           xtime.tv_sec - last_rtc_update >= 659 &&
+           abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
+           jiffies - wall_jiffies == 1) {
+               struct rtc_time tm;
+               to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
+               tm.tm_year -= 1900;
+               tm.tm_mon -= 1;
+               if (ppc_md.set_rtc_time(&tm) == 0)
+                       last_rtc_update = xtime.tv_sec + 1;
+               else
+                       /* Try again one minute later */
+                       last_rtc_update += 60;
         }
 }
 
 /*
  * This version of gettimeofday has microsecond resolution.
  */
-static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val)
+static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
 {
-       unsigned long sec, usec, tb_ticks;
-       unsigned long xsec, tb_xsec;
-       struct gettimeofday_vars * temp_varp;
-       unsigned long temp_tb_to_xs, temp_stamp_xsec;
+       unsigned long sec, usec;
+       u64 tb_ticks, xsec;
+       struct gettimeofday_vars *temp_varp;
+       u64 temp_tb_to_xs, temp_stamp_xsec;
 
        /*
         * These calculations are faster (gets rid of divides)
@@ -164,11 +180,10 @@ static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val)
        tb_ticks = tb_val - temp_varp->tb_orig_stamp;
        temp_tb_to_xs = temp_varp->tb_to_xs;
        temp_stamp_xsec = temp_varp->stamp_xsec;
-       tb_xsec = mulhdu( tb_ticks, temp_tb_to_xs );
-       xsec = temp_stamp_xsec + tb_xsec;
+       xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
        sec = xsec / XSEC_PER_SEC;
-       xsec -= sec * XSEC_PER_SEC;
-       usec = (xsec * USEC_PER_SEC)/XSEC_PER_SEC;
+       usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
+       usec = SCALE_XSEC(usec, 1000000);
 
        tv->tv_sec = sec;
        tv->tv_usec = usec;
@@ -185,6 +200,8 @@ EXPORT_SYMBOL(do_gettimeofday);
 
 static inline void timer_sync_xtime(unsigned long cur_tb)
 {
+#ifdef CONFIG_PPC64
+       /* why do we do this? */
        struct timeval my_tv;
 
        __do_gettimeofday(&my_tv, cur_tb);
@@ -193,47 +210,74 @@ static inline void timer_sync_xtime(unsigned long cur_tb)
                xtime.tv_sec = my_tv.tv_sec;
                xtime.tv_nsec = my_tv.tv_usec * 1000;
        }
+#endif
 }
 
 /*
- * When the timebase - tb_orig_stamp gets too big, we do a manipulation
- * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
- * difference tb - tb_orig_stamp small enough to always fit inside a
- * 32 bits number. This is a requirement of our fast 32 bits userland
- * implementation in the vdso. If we "miss" a call to this function
- * (interrupt latency, CPU locked in a spinlock, ...) and we end up
- * with a too big difference, then the vdso will fallback to calling
- * the syscall
+ * There are two copies of tb_to_xs and stamp_xsec so that no
+ * lock is needed to access and use these values in
+ * do_gettimeofday.  We alternate the copies and as long as a
+ * reasonable time elapses between changes, there will never
+ * be inconsistent values.  ntpd has a minimum of one minute
+ * between updates.
  */
-static __inline__ void timer_recalc_offset(unsigned long cur_tb)
+static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
+                              u64 new_tb_to_xs)
 {
-       struct gettimeofday_vars * temp_varp;
        unsigned temp_idx;
-       unsigned long offset, new_stamp_xsec, new_tb_orig_stamp;
-
-       if (((cur_tb - do_gtod.varp->tb_orig_stamp) & 0x80000000u) == 0)
-               return;
+       struct gettimeofday_vars *temp_varp;
 
        temp_idx = (do_gtod.var_idx == 0);
        temp_varp = &do_gtod.vars[temp_idx];
 
-       new_tb_orig_stamp = cur_tb;
-       offset = new_tb_orig_stamp - do_gtod.varp->tb_orig_stamp;
-       new_stamp_xsec = do_gtod.varp->stamp_xsec + mulhdu(offset, do_gtod.varp->tb_to_xs);
-
-       temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs;
-       temp_varp->tb_orig_stamp = new_tb_orig_stamp;
+       temp_varp->tb_to_xs = new_tb_to_xs;
+       temp_varp->tb_orig_stamp = new_tb_stamp;
        temp_varp->stamp_xsec = new_stamp_xsec;
        smp_mb();
        do_gtod.varp = temp_varp;
        do_gtod.var_idx = temp_idx;
 
+#ifdef CONFIG_PPC64
+       /*
+        * tb_update_count is used to allow the userspace gettimeofday code
+        * to assure itself that it sees a consistent view of the tb_to_xs and
+        * stamp_xsec variables.  It reads the tb_update_count, then reads
+        * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
+        * the two values of tb_update_count match and are even then the
+        * tb_to_xs and stamp_xsec values are consistent.  If not, then it
+        * loops back and reads them again until this criteria is met.
+        */
        ++(systemcfg->tb_update_count);
        smp_wmb();
-       systemcfg->tb_orig_stamp = new_tb_orig_stamp;
+       systemcfg->tb_orig_stamp = new_tb_stamp;
        systemcfg->stamp_xsec = new_stamp_xsec;
+       systemcfg->tb_to_xs = new_tb_to_xs;
        smp_wmb();
        ++(systemcfg->tb_update_count);
+#endif
+}
+
+/*
+ * When the timebase - tb_orig_stamp gets too big, we do a manipulation
+ * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
+ * difference tb - tb_orig_stamp small enough to always fit inside a
+ * 32 bits number. This is a requirement of our fast 32 bits userland
+ * implementation in the vdso. If we "miss" a call to this function
+ * (interrupt latency, CPU locked in a spinlock, ...) and we end up
+ * with a too big difference, then the vdso will fallback to calling
+ * the syscall
+ */
+static __inline__ void timer_recalc_offset(u64 cur_tb)
+{
+       unsigned long offset;
+       u64 new_stamp_xsec;
+
+       offset = cur_tb - do_gtod.varp->tb_orig_stamp;
+       if ((offset & 0x80000000u) == 0)
+               return;
+       new_stamp_xsec = do_gtod.varp->stamp_xsec
+               + mulhdu(offset, do_gtod.varp->tb_to_xs);
+       update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs);
 }
 
 #ifdef CONFIG_SMP
@@ -313,26 +357,46 @@ static void iSeries_tb_recal(void)
  * call will not be needed)
  */
 
-unsigned long tb_last_stamp __cacheline_aligned_in_smp;
+u64 tb_last_stamp __cacheline_aligned_in_smp;
+
+/*
+ * Note that on ppc32 this only stores the bottom 32 bits of
+ * the timebase value, but that's enough to tell when a jiffy
+ * has passed.
+ */
+DEFINE_PER_CPU(unsigned long, last_jiffy);
 
 /*
  * timer_interrupt - gets called when the decrementer overflows,
  * with interrupts disabled.
  */
-int timer_interrupt(struct pt_regs * regs)
+void timer_interrupt(struct pt_regs * regs)
 {
        int next_dec;
-       unsigned long cur_tb;
-       struct paca_struct *lpaca = get_paca();
-       unsigned long cpu = smp_processor_id();
+       int cpu = smp_processor_id();
+       unsigned long ticks;
+
+#ifdef CONFIG_PPC32
+       if (atomic_read(&ppc_n_lost_interrupts) != 0)
+               do_IRQ(regs);
+#endif
 
        irq_enter();
 
        profile_tick(CPU_PROFILING, regs);
 
-       lpaca->lppaca.int_dword.fields.decr_int = 0;
+#ifdef CONFIG_PPC_ISERIES
+       get_paca()->lppaca.int_dword.fields.decr_int = 0;
+#endif
+
+       while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
+              >= tb_ticks_per_jiffy) {
+               /* Update last_jiffy */
+               per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
+               /* Handle RTCL overflow on 601 */
+               if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
+                       per_cpu(last_jiffy, cpu) -= 1000000000;
 
-       while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) {
                /*
                 * We cannot disable the decrementer, so in the period
                 * between this cpu's being marked offline in cpu_online_map
@@ -342,27 +406,26 @@ int timer_interrupt(struct pt_regs * regs)
                 */
                if (!cpu_is_offline(cpu))
                        update_process_times(user_mode(regs));
+
                /*
                 * No need to check whether cpu is offline here; boot_cpuid
                 * should have been fixed up by now.
                 */
-               if (cpu == boot_cpuid) {
-                       write_seqlock(&xtime_lock);
-                       tb_last_stamp = lpaca->next_jiffy_update_tb;
-                       timer_recalc_offset(lpaca->next_jiffy_update_tb);
-                       do_timer(regs);
-                       timer_sync_xtime(lpaca->next_jiffy_update_tb);
-                       timer_check_rtc();
-                       write_sequnlock(&xtime_lock);
-                       if ( adjusting_time && (time_adjust == 0) )
-                               ppc_adjtimex();
-               }
-               lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy;
+               if (cpu != boot_cpuid)
+                       continue;
+
+               write_seqlock(&xtime_lock);
+               tb_last_stamp += tb_ticks_per_jiffy;
+               timer_recalc_offset(tb_last_stamp);
+               do_timer(regs);
+               timer_sync_xtime(tb_last_stamp);
+               timer_check_rtc();
+               write_sequnlock(&xtime_lock);
+               if (adjusting_time && (time_adjust == 0))
+                       ppc_adjtimex();
        }
        
-       next_dec = lpaca->next_jiffy_update_tb - cur_tb;
-       if (next_dec > lpaca->default_decr)
-               next_dec = lpaca->default_decr;
+       next_dec = tb_ticks_per_jiffy - ticks;
        set_dec(next_dec);
 
 #ifdef CONFIG_PPC_ISERIES
@@ -370,16 +433,46 @@ int timer_interrupt(struct pt_regs * regs)
                process_hvlpevents(regs);
 #endif
 
+#ifdef CONFIG_PPC64
        /* collect purr register values often, for accurate calculations */
        if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
                struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
                cu->current_tb = mfspr(SPRN_PURR);
        }
+#endif
 
        irq_exit();
+}
+
+void wakeup_decrementer(void)
+{
+       int i;
+
+       set_dec(tb_ticks_per_jiffy);
+       /*
+        * We don't expect this to be called on a machine with a 601,
+        * so using get_tbl is fine.
+        */
+       tb_last_stamp = get_tb();
+       for_each_cpu(i)
+               per_cpu(last_jiffy, i) = tb_last_stamp;
+}
 
-       return 1;
+#ifdef CONFIG_SMP
+void __init smp_space_timers(unsigned int max_cpus)
+{
+       int i;
+       unsigned long offset = tb_ticks_per_jiffy / max_cpus;
+       unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
+
+       for_each_cpu(i) {
+               if (i != boot_cpuid) {
+                       previous_tb += offset;
+                       per_cpu(last_jiffy, i) = previous_tb;
+               }
+       }
 }
+#endif
 
 /*
  * Scheduler clock - returns current time in nanosec units.
@@ -398,23 +491,24 @@ int do_settimeofday(struct timespec *tv)
        time_t wtm_sec, new_sec = tv->tv_sec;
        long wtm_nsec, new_nsec = tv->tv_nsec;
        unsigned long flags;
-       unsigned long delta_xsec;
        long int tb_delta;
-       unsigned long new_xsec;
+       u64 new_xsec;
 
        if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
                return -EINVAL;
 
        write_seqlock_irqsave(&xtime_lock, flags);
-       /* Updating the RTC is not the job of this code. If the time is
-        * stepped under NTP, the RTC will be update after STA_UNSYNC
-        * is cleared. Tool like clock/hwclock either copy the RTC
+
+       /*
+        * Updating the RTC is not the job of this code. If the time is
+        * stepped under NTP, the RTC will be updated after STA_UNSYNC
+        * is cleared.  Tools like clock/hwclock either copy the RTC
         * to the system time, in which case there is no point in writing
         * to the RTC again, or write to the RTC but then they don't call
         * settimeofday to perform this operation.
         */
 #ifdef CONFIG_PPC_ISERIES
-       if ( first_settimeofday ) {
+       if (first_settimeofday) {
                iSeries_tb_recal();
                first_settimeofday = 0;
        }
@@ -422,7 +516,7 @@ int do_settimeofday(struct timespec *tv)
        tb_delta = tb_ticks_since(tb_last_stamp);
        tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
 
-       new_nsec -= tb_delta / tb_ticks_per_usec / 1000;
+       new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta);
 
        wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
        wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
@@ -437,28 +531,15 @@ int do_settimeofday(struct timespec *tv)
 
        ntp_clear();
 
-       delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp),
-                            do_gtod.varp->tb_to_xs );
-
-       new_xsec = (new_nsec * XSEC_PER_SEC) / NSEC_PER_SEC;
-       new_xsec += new_sec * XSEC_PER_SEC;
-       if ( new_xsec > delta_xsec ) {
-               do_gtod.varp->stamp_xsec = new_xsec - delta_xsec;
-               systemcfg->stamp_xsec = new_xsec - delta_xsec;
-       }
-       else {
-               /* This is only for the case where the user is setting the time
-                * way back to a time such that the boot time would have been
-                * before 1970 ... eg. we booted ten days ago, and we are setting
-                * the time to Jan 5, 1970 */
-               do_gtod.varp->stamp_xsec = new_xsec;
-               do_gtod.varp->tb_orig_stamp = tb_last_stamp;
-               systemcfg->stamp_xsec = new_xsec;
-               systemcfg->tb_orig_stamp = tb_last_stamp;
-       }
+       new_xsec = (u64)new_nsec * XSEC_PER_SEC;
+       do_div(new_xsec, NSEC_PER_SEC);
+       new_xsec += (u64)new_sec * XSEC_PER_SEC;
+       update_gtod(tb_last_stamp, new_xsec, do_gtod.varp->tb_to_xs);
 
+#ifdef CONFIG_PPC64
        systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
        systemcfg->tz_dsttime = sys_tz.tz_dsttime;
+#endif
 
        write_sequnlock_irqrestore(&xtime_lock, flags);
        clock_was_set();
@@ -467,11 +548,9 @@ int do_settimeofday(struct timespec *tv)
 
 EXPORT_SYMBOL(do_settimeofday);
 
-#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_MAPLE) || defined(CONFIG_PPC_BPA)
 void __init generic_calibrate_decr(void)
 {
        struct device_node *cpu;
-       struct div_result divres;
        unsigned int *fp;
        int node_found;
 
@@ -510,33 +589,51 @@ void __init generic_calibrate_decr(void)
                                "(not found)\n");
 
        of_node_put(cpu);
+}
 
-       printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
-              ppc_tb_freq/1000000, ppc_tb_freq%1000000);
-       printk(KERN_INFO "time_init: processor frequency   = %lu.%.6lu MHz\n",
-              ppc_proc_freq/1000000, ppc_proc_freq%1000000);
-
-       tb_ticks_per_jiffy = ppc_tb_freq / HZ;
-       tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
-       tb_ticks_per_usec = ppc_tb_freq / 1000000;
-       tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
-       div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
-       tb_to_xs = divres.result_low;
+unsigned long get_boot_time(void)
+{
+       struct rtc_time tm;
 
-       setup_default_decr();
+       if (ppc_md.get_boot_time)
+               return ppc_md.get_boot_time();
+       if (!ppc_md.get_rtc_time)
+               return 0;
+       ppc_md.get_rtc_time(&tm);
+       return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
+                     tm.tm_hour, tm.tm_min, tm.tm_sec);
 }
-#endif
 
+/* This function is only called on the boot processor */
 void __init time_init(void)
 {
-       /* This function is only called on the boot processor */
        unsigned long flags;
-       struct rtc_time tm;
+       unsigned long tm = 0;
        struct div_result res;
-       unsigned long scale, shift;
+       u64 scale;
+       unsigned shift;
+
+        if (ppc_md.time_init != NULL)
+                timezone_offset = ppc_md.time_init();
 
        ppc_md.calibrate_decr();
 
+       printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
+              ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
+       printk(KERN_INFO "time_init: processor frequency   = %lu.%.6lu MHz\n",
+              ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
+
+       tb_ticks_per_jiffy = ppc_tb_freq / HZ;
+       tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
+       tb_ticks_per_usec = ppc_tb_freq / 1000000;
+       tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
+       div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
+       tb_to_xs = res.result_low;
+
+#ifdef CONFIG_PPC64
+       get_paca()->default_decr = tb_ticks_per_jiffy;
+#endif
+
        /*
         * Compute scale factor for sched_clock.
         * The calibrate_decr() function has set tb_ticks_per_sec,
@@ -559,29 +656,37 @@ void __init time_init(void)
 #ifdef CONFIG_PPC_ISERIES
        if (!piranha_simulator)
 #endif
-               ppc_md.get_boot_time(&tm);
+               tm = get_boot_time();
 
        write_seqlock_irqsave(&xtime_lock, flags);
-       xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
-                             tm.tm_hour, tm.tm_min, tm.tm_sec);
+       xtime.tv_sec = tm;
+       xtime.tv_nsec = 0;
        tb_last_stamp = get_tb();
        do_gtod.varp = &do_gtod.vars[0];
        do_gtod.var_idx = 0;
        do_gtod.varp->tb_orig_stamp = tb_last_stamp;
-       get_paca()->next_jiffy_update_tb = tb_last_stamp + tb_ticks_per_jiffy;
-       do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
+       __get_cpu_var(last_jiffy) = tb_last_stamp;
+       do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
        do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
        do_gtod.varp->tb_to_xs = tb_to_xs;
        do_gtod.tb_to_us = tb_to_us;
+#ifdef CONFIG_PPC64
        systemcfg->tb_orig_stamp = tb_last_stamp;
        systemcfg->tb_update_count = 0;
        systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
        systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
        systemcfg->tb_to_xs = tb_to_xs;
+#endif
 
        time_freq = 0;
 
-       xtime.tv_nsec = 0;
+       /* If platform provided a timezone (pmac), we correct the time */
+        if (timezone_offset) {
+               sys_tz.tz_minuteswest = -timezone_offset / 60;
+               sys_tz.tz_dsttime = 0;
+               xtime.tv_sec -= timezone_offset;
+        }
+
        last_rtc_update = xtime.tv_sec;
        set_normalized_timespec(&wall_to_monotonic,
                                -xtime.tv_sec, -xtime.tv_nsec);
@@ -604,25 +709,28 @@ void __init time_init(void)
 
 void ppc_adjtimex(void)
 {
-       unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, new_tb_to_xs, new_xsec, new_stamp_xsec;
+#ifdef CONFIG_PPC64
+       unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
+               new_tb_to_xs, new_xsec, new_stamp_xsec;
        unsigned long tb_ticks_per_sec_delta;
        long delta_freq, ltemp;
        struct div_result divres; 
        unsigned long flags;
-       struct gettimeofday_vars * temp_varp;
-       unsigned temp_idx;
        long singleshot_ppm = 0;
 
-       /* Compute parts per million frequency adjustment to accomplish the time adjustment
-          implied by time_offset to be applied over the elapsed time indicated by time_constant.
-          Use SHIFT_USEC to get it into the same units as time_freq. */
+       /*
+        * Compute parts per million frequency adjustment to
+        * accomplish the time adjustment implied by time_offset to be
+        * applied over the elapsed time indicated by time_constant.
+        * Use SHIFT_USEC to get it into the same units as
+        * time_freq.
+        */
        if ( time_offset < 0 ) {
                ltemp = -time_offset;
                ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
                ltemp >>= SHIFT_KG + time_constant;
                ltemp = -ltemp;
-       }
-       else {
+       } else {
                ltemp = time_offset;
                ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
                ltemp >>= SHIFT_KG + time_constant;
@@ -639,7 +747,10 @@ void ppc_adjtimex(void)
        
                adjusting_time = 1;
                
-               /* Compute parts per million frequency adjustment to match time_adjust */
+               /*
+                * Compute parts per million frequency adjustment
+                * to match time_adjust
+                */
                singleshot_ppm = tickadj * HZ;  
                /*
                 * The adjustment should be tickadj*HZ to match the code in
@@ -647,7 +758,7 @@ void ppc_adjtimex(void)
                 * large. 3/4 of tickadj*HZ seems about right
                 */
                singleshot_ppm -= singleshot_ppm / 4;
-               /* Use SHIFT_USEC to get it into the same units as time_freq */ 
+               /* Use SHIFT_USEC to get it into the same units as time_freq */
                singleshot_ppm <<= SHIFT_USEC;
                if ( time_adjust < 0 )
                        singleshot_ppm = -singleshot_ppm;
@@ -663,7 +774,10 @@ void ppc_adjtimex(void)
        /* Add up all of the frequency adjustments */
        delta_freq = time_freq + ltemp + singleshot_ppm;
        
-       /* Compute a new value for tb_ticks_per_sec based on the frequency adjustment */
+       /*
+        * Compute a new value for tb_ticks_per_sec based on
+        * the frequency adjustment
+        */
        den = 1000000 * (1 << (SHIFT_USEC - 8));
        if ( delta_freq < 0 ) {
                tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
@@ -678,61 +792,37 @@ void ppc_adjtimex(void)
        printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
        printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld  new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
 #endif
-                               
-       /* Compute a new value of tb_to_xs (used to convert tb to microseconds and a new value of 
-          stamp_xsec which is the time (in 1/2^20 second units) corresponding to tb_orig_stamp.  This 
-          new value of stamp_xsec compensates for the change in frequency (implied by the new tb_to_xs)
-          which guarantees that the current time remains the same */ 
+
+       /*
+        * Compute a new value of tb_to_xs (used to convert tb to
+        * microseconds) and a new value of stamp_xsec which is the
+        * time (in 1/2^20 second units) corresponding to
+        * tb_orig_stamp.  This new value of stamp_xsec compensates
+        * for the change in frequency (implied by the new tb_to_xs)
+        * which guarantees that the current time remains the same.
+        */
        write_seqlock_irqsave( &xtime_lock, flags );
        tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
-       div128_by_32( 1024*1024, 0, new_tb_ticks_per_sec, &divres );
+       div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
        new_tb_to_xs = divres.result_low;
-       new_xsec = mulhdu( tb_ticks, new_tb_to_xs );
+       new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
 
-       old_xsec = mulhdu( tb_ticks, do_gtod.varp->tb_to_xs );
+       old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
        new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
 
-       /* There are two copies of tb_to_xs and stamp_xsec so that no lock is needed to access and use these
-          values in do_gettimeofday.  We alternate the copies and as long as a reasonable time elapses between
-          changes, there will never be inconsistent values.  ntpd has a minimum of one minute between updates */
-
-       temp_idx = (do_gtod.var_idx == 0);
-       temp_varp = &do_gtod.vars[temp_idx];
-
-       temp_varp->tb_to_xs = new_tb_to_xs;
-       temp_varp->stamp_xsec = new_stamp_xsec;
-       temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp;
-       smp_mb();
-       do_gtod.varp = temp_varp;
-       do_gtod.var_idx = temp_idx;
-
-       /*
-        * tb_update_count is used to allow the problem state gettimeofday code
-        * to assure itself that it sees a consistent view of the tb_to_xs and
-        * stamp_xsec variables.  It reads the tb_update_count, then reads
-        * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
-        * the two values of tb_update_count match and are even then the
-        * tb_to_xs and stamp_xsec values are consistent.  If not, then it
-        * loops back and reads them again until this criteria is met.
-        */
-       ++(systemcfg->tb_update_count);
-       smp_wmb();
-       systemcfg->tb_to_xs = new_tb_to_xs;
-       systemcfg->stamp_xsec = new_stamp_xsec;
-       smp_wmb();
-       ++(systemcfg->tb_update_count);
+       update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
 
        write_sequnlock_irqrestore( &xtime_lock, flags );
-
+#endif /* CONFIG_PPC64 */
 }
 
 
-#define TICK_SIZE tick
 #define FEBRUARY       2
 #define        STARTOFTIME     1970
 #define SECDAY         86400L
 #define SECYR          (SECDAY * 365)
-#define        leapyear(year)          ((year) % 4 == 0)
+#define        leapyear(year)          ((year) % 4 == 0 && \
+                                ((year) % 100 != 0 || (year) % 400 == 0))
 #define        days_in_year(a)         (leapyear(a) ? 366 : 365)
 #define        days_in_month(a)        (month_days[(a) - 1])
 
@@ -750,37 +840,25 @@ void GregorianDay(struct rtc_time * tm)
        int day;
        int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
 
-       lastYear=tm->tm_year-1;
+       lastYear = tm->tm_year - 1;
 
        /*
         * Number of leap corrections to apply up to end of last year
         */
-       leapsToDate = lastYear/4 - lastYear/100 + lastYear/400;
+       leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
 
        /*
         * This year is a leap year if it is divisible by 4 except when it is
         * divisible by 100 unless it is divisible by 400
         *
-        * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be
+        * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
         */
-       if((tm->tm_year%4==0) &&
-          ((tm->tm_year%100!=0) || (tm->tm_year%400==0)) &&
-          (tm->tm_mon>2))
-       {
-               /*
-                * We are past Feb. 29 in a leap year
-                */
-               day=1;
-       }
-       else
-       {
-               day=0;
-       }
+       day = tm->tm_mon > 2 && leapyear(tm->tm_year);
 
        day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
                   tm->tm_mday;
 
-       tm->tm_wday=day%7;
+       tm->tm_wday = day % 7;
 }
 
 void to_tm(int tim, struct rtc_time * tm)
@@ -826,14 +904,16 @@ void to_tm(int tim, struct rtc_time * tm)
  * oscillators and the precision with which the timebase frequency
  * is measured but does not harm.
  */
-unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) {
+unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
+{
         unsigned mlt=0, tmp, err;
         /* No concern for performance, it's done once: use a stupid
          * but safe and compact method to find the multiplier.
          */
   
         for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
-                if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp;
+                if (mulhwu(inscale, mlt|tmp) < outscale)
+                       mlt |= tmp;
         }
   
         /* We might still be off by 1 for the best approximation.
@@ -843,39 +923,41 @@ unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) {
          * some might have been forgotten in the test however.
          */
   
-        err = inscale*(mlt+1);
-        if (err <= inscale/2) mlt++;
+        err = inscale * (mlt+1);
+        if (err <= inscale/2)
+               mlt++;
         return mlt;
-  }
+}
 
 /*
  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
  * result.
  */
-
-void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
-                  unsigned divisor, struct div_result *dr )
+void div128_by_32(u64 dividend_high, u64 dividend_low,
+                 unsigned divisor, struct div_result *dr)
 {
-       unsigned long a,b,c,d, w,x,y,z, ra,rb,rc;
+       unsigned long a, b, c, d;
+       unsigned long w, x, y, z;
+       u64 ra, rb, rc;
 
        a = dividend_high >> 32;
        b = dividend_high & 0xffffffff;
        c = dividend_low >> 32;
        d = dividend_low & 0xffffffff;
 
-       w = a/divisor;
-       ra = (a - (w * divisor)) << 32;
+       w = a / divisor;
+       ra = ((u64)(a - (w * divisor)) << 32) + b;
 
-       x = (ra + b)/divisor;
-       rb = ((ra + b) - (x * divisor)) << 32;
+       rb = ((u64) do_div(ra, divisor) << 32) + c;
+       x = ra;
 
-       y = (rb + c)/divisor;
-       rc = ((rb + c) - (y * divisor)) << 32;
+       rc = ((u64) do_div(rb, divisor) << 32) + d;
+       y = rb;
 
-       z = (rc + d)/divisor;
+       do_div(rc, divisor);
+       z = rc;
 
-       dr->result_high = (w << 32) + x;
-       dr->result_low  = (y << 32) + z;
+       dr->result_high = ((u64)w << 32) + x;
+       dr->result_low  = ((u64)y << 32) + z;
 
 }
-
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
new file mode 100644 (file)
index 0000000..f875803
--- /dev/null
@@ -0,0 +1,1101 @@
+/*
+ *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ *  Modified by Cort Dougan (cort@cs.nmt.edu)
+ *  and Paul Mackerras (paulus@samba.org)
+ */
+
+/*
+ * This file handles the architecture-dependent parts of hardware exceptions
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/prctl.h>
+#include <linux/delay.h>
+#include <linux/kprobes.h>
+
+#include <asm/kdebug.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/rtas.h>
+#include <asm/xmon.h>
+#include <asm/pmc.h>
+#ifdef CONFIG_PPC32
+#include <asm/reg.h>
+#endif
+#ifdef CONFIG_PMAC_BACKLIGHT
+#include <asm/backlight.h>
+#endif
+#ifdef CONFIG_PPC64
+#include <asm/firmware.h>
+#include <asm/processor.h>
+#include <asm/systemcfg.h>
+#endif
+
+#ifdef CONFIG_PPC64    /* XXX */
+#define _IO_BASE       pci_io_base
+#endif
+
+#ifdef CONFIG_DEBUGGER
+int (*__debugger)(struct pt_regs *regs);
+int (*__debugger_ipi)(struct pt_regs *regs);
+int (*__debugger_bpt)(struct pt_regs *regs);
+int (*__debugger_sstep)(struct pt_regs *regs);
+int (*__debugger_iabr_match)(struct pt_regs *regs);
+int (*__debugger_dabr_match)(struct pt_regs *regs);
+int (*__debugger_fault_handler)(struct pt_regs *regs);
+
+EXPORT_SYMBOL(__debugger);
+EXPORT_SYMBOL(__debugger_ipi);
+EXPORT_SYMBOL(__debugger_bpt);
+EXPORT_SYMBOL(__debugger_sstep);
+EXPORT_SYMBOL(__debugger_iabr_match);
+EXPORT_SYMBOL(__debugger_dabr_match);
+EXPORT_SYMBOL(__debugger_fault_handler);
+#endif
+
+struct notifier_block *powerpc_die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+       int err = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&die_notifier_lock, flags);
+       err = notifier_chain_register(&powerpc_die_chain, nb);
+       spin_unlock_irqrestore(&die_notifier_lock, flags);
+       return err;
+}
+
+/*
+ * Trap & Exception support
+ */
+
+static DEFINE_SPINLOCK(die_lock);
+
+int die(const char *str, struct pt_regs *regs, long err)
+{
+       static int die_counter;
+       int nl = 0;
+
+       if (debugger(regs))
+               return 1;
+
+       console_verbose();
+       spin_lock_irq(&die_lock);
+       bust_spinlocks(1);
+#ifdef CONFIG_PMAC_BACKLIGHT
+       if (_machine == _MACH_Pmac) {
+               set_backlight_enable(1);
+               set_backlight_level(BACKLIGHT_MAX);
+       }
+#endif
+       printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
+#ifdef CONFIG_PREEMPT
+       printk("PREEMPT ");
+       nl = 1;
+#endif
+#ifdef CONFIG_SMP
+       printk("SMP NR_CPUS=%d ", NR_CPUS);
+       nl = 1;
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       printk("DEBUG_PAGEALLOC ");
+       nl = 1;
+#endif
+#ifdef CONFIG_NUMA
+       printk("NUMA ");
+       nl = 1;
+#endif
+#ifdef CONFIG_PPC64
+       switch (systemcfg->platform) {
+       case PLATFORM_PSERIES:
+               printk("PSERIES ");
+               nl = 1;
+               break;
+       case PLATFORM_PSERIES_LPAR:
+               printk("PSERIES LPAR ");
+               nl = 1;
+               break;
+       case PLATFORM_ISERIES_LPAR:
+               printk("ISERIES LPAR ");
+               nl = 1;
+               break;
+       case PLATFORM_POWERMAC:
+               printk("POWERMAC ");
+               nl = 1;
+               break;
+       case PLATFORM_BPA:
+               printk("BPA ");
+               nl = 1;
+               break;
+       }
+#endif
+       if (nl)
+               printk("\n");
+       print_modules();
+       show_regs(regs);
+       bust_spinlocks(0);
+       spin_unlock_irq(&die_lock);
+
+       if (in_interrupt())
+               panic("Fatal exception in interrupt");
+
+       if (panic_on_oops) {
+#ifdef CONFIG_PPC64
+               printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+               ssleep(5);
+#endif
+               panic("Fatal exception");
+       }
+       do_exit(err);
+
+       return 0;
+}
+
+void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
+{
+       siginfo_t info;
+
+       if (!user_mode(regs)) {
+               if (die("Exception in kernel mode", regs, signr))
+                       return;
+       }
+
+       memset(&info, 0, sizeof(info));
+       info.si_signo = signr;
+       info.si_code = code;
+       info.si_addr = (void __user *) addr;
+       force_sig_info(signr, &info, current);
+
+       /*
+        * Init gets no signals that it doesn't have a handler for.
+        * That's all very well, but if it has caused a synchronous
+        * exception and we ignore the resulting signal, it will just
+        * generate the same exception over and over again and we get
+        * nowhere.  Better to kill it and let the kernel panic.
+        */
+       if (current->pid == 1) {
+               __sighandler_t handler;
+
+               spin_lock_irq(&current->sighand->siglock);
+               handler = current->sighand->action[signr-1].sa.sa_handler;
+               spin_unlock_irq(&current->sighand->siglock);
+               if (handler == SIG_DFL) {
+                       /* init has generated a synchronous exception
+                          and it doesn't have a handler for the signal */
+                       printk(KERN_CRIT "init has generated signal %d "
+                              "but has no handler for it\n", signr);
+                       do_exit(signr);
+               }
+       }
+}
+
+#ifdef CONFIG_PPC64
+void system_reset_exception(struct pt_regs *regs)
+{
+       /* See if any machine dependent calls */
+       if (ppc_md.system_reset_exception)
+               ppc_md.system_reset_exception(regs);
+
+       die("System Reset", regs, SIGABRT);
+
+       /* Must die if the interrupt is not recoverable */
+       if (!(regs->msr & MSR_RI))
+               panic("Unrecoverable System Reset");
+
+       /* What should we do here? We could issue a shutdown or hard reset. */
+}
+#endif
+
+/*
+ * I/O accesses can cause machine checks on powermacs.
+ * Check if the NIP corresponds to the address of a sync
+ * instruction for which there is an entry in the exception
+ * table.
+ * Note that the 601 only takes a machine check on TEA
+ * (transfer error ack) signal assertion, and does not
+ * set any of the top 16 bits of SRR1.
+ *  -- paulus.
+ */
+static inline int check_io_access(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC_PMAC
+       unsigned long msr = regs->msr;
+       const struct exception_table_entry *entry;
+       unsigned int *nip = (unsigned int *)regs->nip;
+
+       if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
+           && (entry = search_exception_tables(regs->nip)) != NULL) {
+               /*
+                * Check that it's a sync instruction, or somewhere
+                * in the twi; isync; nop sequence that inb/inw/inl uses.
+                * As the address is in the exception table
+                * we should be able to read the instr there.
+                * For the debug message, we look at the preceding
+                * load or store.
+                */
+               if (*nip == 0x60000000)         /* nop */
+                       nip -= 2;
+               else if (*nip == 0x4c00012c)    /* isync */
+                       --nip;
+               if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
+                       /* sync or twi */
+                       unsigned int rb;
+
+                       --nip;
+                       rb = (*nip >> 11) & 0x1f;
+                       printk(KERN_DEBUG "%s bad port %lx at %p\n",
+                              (*nip & 0x100)? "OUT to": "IN from",
+                              regs->gpr[rb] - _IO_BASE, nip);
+                       regs->msr |= MSR_RI;
+                       regs->nip = entry->fixup;
+                       return 1;
+               }
+       }
+#endif /* CONFIG_PPC_PMAC */
+       return 0;
+}
+
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+/* On 4xx, the reason for the machine check or program exception
+   is in the ESR. */
+#define get_reason(regs)       ((regs)->dsisr)
+#ifndef CONFIG_FSL_BOOKE
+#define get_mc_reason(regs)    ((regs)->dsisr)
+#else
+#define get_mc_reason(regs)    (mfspr(SPRN_MCSR))
+#endif
+#define REASON_FP              ESR_FP
+#define REASON_ILLEGAL         (ESR_PIL | ESR_PUO)
+#define REASON_PRIVILEGED      ESR_PPR
+#define REASON_TRAP            ESR_PTR
+
+/* single-step stuff */
+#define single_stepping(regs)  (current->thread.dbcr0 & DBCR0_IC)
+#define clear_single_step(regs)        (current->thread.dbcr0 &= ~DBCR0_IC)
+
+#else
+/* On non-4xx, the reason for the machine check or program
+   exception is in the MSR. */
+#define get_reason(regs)       ((regs)->msr)
+#define get_mc_reason(regs)    ((regs)->msr)
+#define REASON_FP              0x100000
+#define REASON_ILLEGAL         0x80000
+#define REASON_PRIVILEGED      0x40000
+#define REASON_TRAP            0x20000
+
+#define single_stepping(regs)  ((regs)->msr & MSR_SE)
+#define clear_single_step(regs)        ((regs)->msr &= ~MSR_SE)
+#endif
+
+/*
+ * This is "fall-back" implementation for configurations
+ * which don't provide platform-specific machine check info
+ */
+void __attribute__ ((weak))
+platform_machine_check(struct pt_regs *regs)
+{
+}
+
+void machine_check_exception(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC64
+       int recover = 0;
+
+       /* See if any machine dependent calls */
+       if (ppc_md.machine_check_exception)
+               recover = ppc_md.machine_check_exception(regs);
+
+       if (recover)
+               return;
+#else
+       unsigned long reason = get_mc_reason(regs);
+
+       if (user_mode(regs)) {
+               regs->msr |= MSR_RI;
+               _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
+               return;
+       }
+
+#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
+       /* the qspan pci read routines can cause machine checks -- Cort */
+       bad_page_fault(regs, regs->dar, SIGBUS);
+       return;
+#endif
+
+       if (debugger_fault_handler(regs)) {
+               regs->msr |= MSR_RI;
+               return;
+       }
+
+       if (check_io_access(regs))
+               return;
+
+#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
+       if (reason & ESR_IMCP) {
+               printk("Instruction");
+               mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+       } else
+               printk("Data");
+       printk(" machine check in kernel mode.\n");
+#elif defined(CONFIG_440A)
+       printk("Machine check in kernel mode.\n");
+       if (reason & ESR_IMCP){
+               printk("Instruction Synchronous Machine Check exception\n");
+               mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+       }
+       else {
+               u32 mcsr = mfspr(SPRN_MCSR);
+               if (mcsr & MCSR_IB)
+                       printk("Instruction Read PLB Error\n");
+               if (mcsr & MCSR_DRB)
+                       printk("Data Read PLB Error\n");
+               if (mcsr & MCSR_DWB)
+                       printk("Data Write PLB Error\n");
+               if (mcsr & MCSR_TLBP)
+                       printk("TLB Parity Error\n");
+               if (mcsr & MCSR_ICP){
+                       flush_instruction_cache();
+                       printk("I-Cache Parity Error\n");
+               }
+               if (mcsr & MCSR_DCSP)
+                       printk("D-Cache Search Parity Error\n");
+               if (mcsr & MCSR_DCFP)
+                       printk("D-Cache Flush Parity Error\n");
+               if (mcsr & MCSR_IMPE)
+                       printk("Machine Check exception is imprecise\n");
+
+               /* Clear MCSR */
+               mtspr(SPRN_MCSR, mcsr);
+       }
+#elif defined (CONFIG_E500)
+       printk("Machine check in kernel mode.\n");
+       printk("Caused by (from MCSR=%lx): ", reason);
+
+       if (reason & MCSR_MCP)
+               printk("Machine Check Signal\n");
+       if (reason & MCSR_ICPERR)
+               printk("Instruction Cache Parity Error\n");
+       if (reason & MCSR_DCP_PERR)
+               printk("Data Cache Push Parity Error\n");
+       if (reason & MCSR_DCPERR)
+               printk("Data Cache Parity Error\n");
+       if (reason & MCSR_GL_CI)
+               printk("Guarded Load or Cache-Inhibited stwcx.\n");
+       if (reason & MCSR_BUS_IAERR)
+               printk("Bus - Instruction Address Error\n");
+       if (reason & MCSR_BUS_RAERR)
+               printk("Bus - Read Address Error\n");
+       if (reason & MCSR_BUS_WAERR)
+               printk("Bus - Write Address Error\n");
+       if (reason & MCSR_BUS_IBERR)
+               printk("Bus - Instruction Data Error\n");
+       if (reason & MCSR_BUS_RBERR)
+               printk("Bus - Read Data Bus Error\n");
+       if (reason & MCSR_BUS_WBERR)
+               printk("Bus - Read Data Bus Error\n");
+       if (reason & MCSR_BUS_IPERR)
+               printk("Bus - Instruction Parity Error\n");
+       if (reason & MCSR_BUS_RPERR)
+               printk("Bus - Read Parity Error\n");
+#elif defined (CONFIG_E200)
+       printk("Machine check in kernel mode.\n");
+       printk("Caused by (from MCSR=%lx): ", reason);
+
+       if (reason & MCSR_MCP)
+               printk("Machine Check Signal\n");
+       if (reason & MCSR_CP_PERR)
+               printk("Cache Push Parity Error\n");
+       if (reason & MCSR_CPERR)
+               printk("Cache Parity Error\n");
+       if (reason & MCSR_EXCP_ERR)
+               printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
+       if (reason & MCSR_BUS_IRERR)
+               printk("Bus - Read Bus Error on instruction fetch\n");
+       if (reason & MCSR_BUS_DRERR)
+               printk("Bus - Read Bus Error on data load\n");
+       if (reason & MCSR_BUS_WRERR)
+               printk("Bus - Write Bus Error on buffered store or cache line push\n");
+#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
+       printk("Machine check in kernel mode.\n");
+       printk("Caused by (from SRR1=%lx): ", reason);
+       switch (reason & 0x601F0000) {
+       case 0x80000:
+               printk("Machine check signal\n");
+               break;
+       case 0:         /* for 601 */
+       case 0x40000:
+       case 0x140000:  /* 7450 MSS error and TEA */
+               printk("Transfer error ack signal\n");
+               break;
+       case 0x20000:
+               printk("Data parity error signal\n");
+               break;
+       case 0x10000:
+               printk("Address parity error signal\n");
+               break;
+       case 0x20000000:
+               printk("L1 Data Cache error\n");
+               break;
+       case 0x40000000:
+               printk("L1 Instruction Cache error\n");
+               break;
+       case 0x00100000:
+               printk("L2 data cache parity error\n");
+               break;
+       default:
+               printk("Unknown values in msr\n");
+       }
+#endif /* CONFIG_4xx */
+
+       /*
+        * Optional platform-provided routine to print out
+        * additional info, e.g. bus error registers.
+        */
+       platform_machine_check(regs);
+#endif /* CONFIG_PPC64 */
+
+       if (debugger_fault_handler(regs))
+               return;
+       die("Machine check", regs, SIGBUS);
+
+       /* Must die if the interrupt is not recoverable */
+       if (!(regs->msr & MSR_RI))
+               panic("Unrecoverable Machine check");
+}
+
+void SMIException(struct pt_regs *regs)
+{
+       die("System Management Interrupt", regs, SIGABRT);
+}
+
+void unknown_exception(struct pt_regs *regs)
+{
+       printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
+              regs->nip, regs->msr, regs->trap);
+
+       _exception(SIGTRAP, regs, 0, 0);
+}
+
+void instruction_breakpoint_exception(struct pt_regs *regs)
+{
+       if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
+                                       5, SIGTRAP) == NOTIFY_STOP)
+               return;
+       if (debugger_iabr_match(regs))
+               return;
+       _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
+}
+
+void RunModeException(struct pt_regs *regs)
+{
+       _exception(SIGTRAP, regs, 0, 0);
+}
+
+void __kprobes single_step_exception(struct pt_regs *regs)
+{
+       regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
+
+       if (notify_die(DIE_SSTEP, "single_step", regs, 5,
+                                       5, SIGTRAP) == NOTIFY_STOP)
+               return;
+       if (debugger_sstep(regs))
+               return;
+
+       _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
+}
+
+/*
+ * After we have successfully emulated an instruction, we have to
+ * check if the instruction was being single-stepped, and if so,
+ * pretend we got a single-step exception.  This was pointed out
+ * by Kumar Gala.  -- paulus
+ */
+static void emulate_single_step(struct pt_regs *regs)
+{
+       if (single_stepping(regs)) {
+               clear_single_step(regs);
+               _exception(SIGTRAP, regs, TRAP_TRACE, 0);
+       }
+}
+
+static void parse_fpe(struct pt_regs *regs)
+{
+       int code = 0;
+       unsigned long fpscr;
+
+       flush_fp_to_thread(current);
+
+       fpscr = current->thread.fpscr;
+
+       /* Invalid operation */
+       if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
+               code = FPE_FLTINV;
+
+       /* Overflow */
+       else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
+               code = FPE_FLTOVF;
+
+       /* Underflow */
+       else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
+               code = FPE_FLTUND;
+
+       /* Divide by zero */
+       else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
+               code = FPE_FLTDIV;
+
+       /* Inexact result */
+       else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
+               code = FPE_FLTRES;
+
+       _exception(SIGFPE, regs, code, regs->nip);
+}
+
+/*
+ * Illegal instruction emulation support.  Originally written to
+ * provide the PVR to user applications using the mfspr rd, PVR.
+ * Return non-zero if we can't emulate, or -EFAULT if the associated
+ * memory access caused an access fault.  Return zero on success.
+ *
+ * There are a couple of ways to do this, either "decode" the instruction
+ * or directly match lots of bits.  In this case, matching lots of
+ * bits is faster and easier.
+ *
+ */
+#define INST_MFSPR_PVR         0x7c1f42a6
+#define INST_MFSPR_PVR_MASK    0xfc1fffff
+
+#define INST_DCBA              0x7c0005ec
+#define INST_DCBA_MASK         0x7c0007fe
+
+#define INST_MCRXR             0x7c000400
+#define INST_MCRXR_MASK                0x7c0007fe
+
+#define INST_STRING            0x7c00042a
+#define INST_STRING_MASK       0x7c0007fe
+#define INST_STRING_GEN_MASK   0x7c00067e
+#define INST_LSWI              0x7c0004aa
+#define INST_LSWX              0x7c00042a
+#define INST_STSWI             0x7c0005aa
+#define INST_STSWX             0x7c00052a
+
+static int emulate_string_inst(struct pt_regs *regs, u32 instword)
+{
+       u8 rT = (instword >> 21) & 0x1f;
+       u8 rA = (instword >> 16) & 0x1f;
+       u8 NB_RB = (instword >> 11) & 0x1f;
+       u32 num_bytes;
+       unsigned long EA;
+       int pos = 0;
+
+       /* Early out if we are an invalid form of lswx */
+       if ((instword & INST_STRING_MASK) == INST_LSWX)
+               if ((rT == rA) || (rT == NB_RB))
+                       return -EINVAL;
+
+       EA = (rA == 0) ? 0 : regs->gpr[rA];
+
+       switch (instword & INST_STRING_MASK) {
+               case INST_LSWX:
+               case INST_STSWX:
+                       EA += NB_RB;
+                       num_bytes = regs->xer & 0x7f;
+                       break;
+               case INST_LSWI:
+               case INST_STSWI:
+                       num_bytes = (NB_RB == 0) ? 32 : NB_RB;
+                       break;
+               default:
+                       return -EINVAL;
+       }
+
+       while (num_bytes != 0)
+       {
+               u8 val;
+               u32 shift = 8 * (3 - (pos & 0x3));
+
+               switch ((instword & INST_STRING_MASK)) {
+                       case INST_LSWX:
+                       case INST_LSWI:
+                               if (get_user(val, (u8 __user *)EA))
+                                       return -EFAULT;
+                               /* first time updating this reg,
+                                * zero it out */
+                               if (pos == 0)
+                                       regs->gpr[rT] = 0;
+                               regs->gpr[rT] |= val << shift;
+                               break;
+                       case INST_STSWI:
+                       case INST_STSWX:
+                               val = regs->gpr[rT] >> shift;
+                               if (put_user(val, (u8 __user *)EA))
+                                       return -EFAULT;
+                               break;
+               }
+               /* move EA to next address */
+               EA += 1;
+               num_bytes--;
+
+               /* manage our position within the register */
+               if (++pos == 4) {
+                       pos = 0;
+                       if (++rT == 32)
+                               rT = 0;
+               }
+       }
+
+       return 0;
+}
+
+static int emulate_instruction(struct pt_regs *regs)
+{
+       u32 instword;
+       u32 rd;
+
+       if (!user_mode(regs))
+               return -EINVAL;
+       CHECK_FULL_REGS(regs);
+
+       if (get_user(instword, (u32 __user *)(regs->nip)))
+               return -EFAULT;
+
+       /* Emulate the mfspr rD, PVR. */
+       if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
+               rd = (instword >> 21) & 0x1f;
+               regs->gpr[rd] = mfspr(SPRN_PVR);
+               return 0;
+       }
+
+       /* Emulating the dcba insn is just a no-op.  */
+       if ((instword & INST_DCBA_MASK) == INST_DCBA)
+               return 0;
+
+       /* Emulate the mcrxr insn.  */
+       if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
+               int shift = (instword >> 21) & 0x1c;
+               unsigned long msk = 0xf0000000UL >> shift;
+
+               regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
+               regs->xer &= ~0xf0000000UL;
+               return 0;
+       }
+
+       /* Emulate load/store string insn. */
+       if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
+               return emulate_string_inst(regs, instword);
+
+       return -EINVAL;
+}
+
+/*
+ * Look through the list of trap instructions that are used for BUG(),
+ * BUG_ON() and WARN_ON() and see if we hit one.  At this point we know
+ * that the exception was caused by a trap instruction of some kind.
+ * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
+ * otherwise.
+ */
+extern struct bug_entry __start___bug_table[], __stop___bug_table[];
+
+#ifndef CONFIG_MODULES
+#define module_find_bug(x)     NULL
+#endif
+
+struct bug_entry *find_bug(unsigned long bugaddr)
+{
+       struct bug_entry *bug;
+
+       for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
+               if (bugaddr == bug->bug_addr)
+                       return bug;
+       return module_find_bug(bugaddr);
+}
+
+static int check_bug_trap(struct pt_regs *regs)
+{
+       struct bug_entry *bug;
+       unsigned long addr;
+
+       if (regs->msr & MSR_PR)
+               return 0;       /* not in kernel */
+       addr = regs->nip;       /* address of trap instruction */
+       if (addr < PAGE_OFFSET)
+               return 0;
+       bug = find_bug(regs->nip);
+       if (bug == NULL)
+               return 0;
+       if (bug->line & BUG_WARNING_TRAP) {
+               /* this is a WARN_ON rather than BUG/BUG_ON */
+#ifdef CONFIG_XMON
+               xmon_printf(KERN_ERR "Badness in %s at %s:%d\n",
+                      bug->function, bug->file,
+                      bug->line & ~BUG_WARNING_TRAP);
+#endif /* CONFIG_XMON */               
+               printk(KERN_ERR "Badness in %s at %s:%d\n",
+                      bug->function, bug->file,
+                      bug->line & ~BUG_WARNING_TRAP);
+               dump_stack();
+               return 1;
+       }
+#ifdef CONFIG_XMON
+       xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
+              bug->function, bug->file, bug->line);
+       xmon(regs);
+#endif /* CONFIG_XMON */
+       printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
+              bug->function, bug->file, bug->line);
+
+       return 0;
+}
+
+void __kprobes program_check_exception(struct pt_regs *regs)
+{
+       unsigned int reason = get_reason(regs);
+       extern int do_mathemu(struct pt_regs *regs);
+
+#ifdef CONFIG_MATH_EMULATION
+       /* (reason & REASON_ILLEGAL) would be the obvious thing here,
+        * but there seems to be a hardware bug on the 405GP (RevD)
+        * that means ESR is sometimes set incorrectly - either to
+        * ESR_DST (!?) or 0.  In the process of chasing this with the
+        * hardware people - not sure if it can happen on any illegal
+        * instruction or only on FP instructions, whether there is a
+        * pattern to occurences etc. -dgibson 31/Mar/2003 */
+       if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
+               emulate_single_step(regs);
+               return;
+       }
+#endif /* CONFIG_MATH_EMULATION */
+
+       if (reason & REASON_FP) {
+               /* IEEE FP exception */
+               parse_fpe(regs);
+               return;
+       }
+       if (reason & REASON_TRAP) {
+               /* trap exception */
+               if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
+                               == NOTIFY_STOP)
+                       return;
+               if (debugger_bpt(regs))
+                       return;
+               if (check_bug_trap(regs)) {
+                       regs->nip += 4;
+                       return;
+               }
+               _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
+               return;
+       }
+
+       /* Try to emulate it if we should. */
+       if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
+               switch (emulate_instruction(regs)) {
+               case 0:
+                       regs->nip += 4;
+                       emulate_single_step(regs);
+                       return;
+               case -EFAULT:
+                       _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
+                       return;
+               }
+       }
+
+       if (reason & REASON_PRIVILEGED)
+               _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
+       else
+               _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+}
+
+void alignment_exception(struct pt_regs *regs)
+{
+       int fixed;
+
+       fixed = fix_alignment(regs);
+
+       if (fixed == 1) {
+               regs->nip += 4; /* skip over emulated instruction */
+               emulate_single_step(regs);
+               return;
+       }
+
+       /* Operand address was bad */
+       if (fixed == -EFAULT) {
+               if (user_mode(regs))
+                       _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
+               else
+                       /* Search exception table */
+                       bad_page_fault(regs, regs->dar, SIGSEGV);
+               return;
+       }
+       _exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
+}
+
+void StackOverflow(struct pt_regs *regs)
+{
+       printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
+              current, regs->gpr[1]);
+       debugger(regs);
+       show_regs(regs);
+       panic("kernel stack overflow");
+}
+
+void nonrecoverable_exception(struct pt_regs *regs)
+{
+       printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
+              regs->nip, regs->msr);
+       debugger(regs);
+       die("nonrecoverable exception", regs, SIGKILL);
+}
+
+void trace_syscall(struct pt_regs *regs)
+{
+       printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
+              current, current->pid, regs->nip, regs->link, regs->gpr[0],
+              regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
+}
+
+void kernel_fp_unavailable_exception(struct pt_regs *regs)
+{
+       printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
+                         "%lx at %lx\n", regs->trap, regs->nip);
+       die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
+}
+
+void altivec_unavailable_exception(struct pt_regs *regs)
+{
+#if !defined(CONFIG_ALTIVEC)
+       if (user_mode(regs)) {
+               /* A user program has executed an altivec instruction,
+                  but this kernel doesn't support altivec. */
+               _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+               return;
+       }
+#endif
+       printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
+                       "%lx at %lx\n", regs->trap, regs->nip);
+       die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
+}
+
+#ifdef CONFIG_PPC64
+extern perf_irq_t perf_irq;
+#endif
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_E500)
+void performance_monitor_exception(struct pt_regs *regs)
+{
+       perf_irq(regs);
+}
+#endif
+
+#ifdef CONFIG_8xx
+void SoftwareEmulation(struct pt_regs *regs)
+{
+       extern int do_mathemu(struct pt_regs *);
+       extern int Soft_emulate_8xx(struct pt_regs *);
+       int errcode;
+
+       CHECK_FULL_REGS(regs);
+
+       if (!user_mode(regs)) {
+               debugger(regs);
+               die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
+       }
+
+#ifdef CONFIG_MATH_EMULATION
+       errcode = do_mathemu(regs);
+#else
+       errcode = Soft_emulate_8xx(regs);
+#endif
+       if (errcode) {
+               if (errcode > 0)
+                       _exception(SIGFPE, regs, 0, 0);
+               else if (errcode == -EFAULT)
+                       _exception(SIGSEGV, regs, 0, 0);
+               else
+                       _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+       } else
+               emulate_single_step(regs);
+}
+#endif /* CONFIG_8xx */
+
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+
+void DebugException(struct pt_regs *regs, unsigned long debug_status)
+{
+       if (debug_status & DBSR_IC) {   /* instruction completion */
+               regs->msr &= ~MSR_DE;
+               if (user_mode(regs)) {
+                       current->thread.dbcr0 &= ~DBCR0_IC;
+               } else {
+                       /* Disable instruction completion */
+                       mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
+                       /* Clear the instruction completion event */
+                       mtspr(SPRN_DBSR, DBSR_IC);
+                       if (debugger_sstep(regs))
+                               return;
+               }
+               _exception(SIGTRAP, regs, TRAP_TRACE, 0);
+       }
+}
+#endif /* CONFIG_4xx || CONFIG_BOOKE */
+
+#if !defined(CONFIG_TAU_INT)
+void TAUException(struct pt_regs *regs)
+{
+       printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
+              regs->nip, regs->msr, regs->trap, print_tainted());
+}
+#endif /* CONFIG_INT_TAU */
+
+#ifdef CONFIG_ALTIVEC
+void altivec_assist_exception(struct pt_regs *regs)
+{
+       int err;
+
+       if (!user_mode(regs)) {
+               printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
+                      " at %lx\n", regs->nip);
+               die("Kernel VMX/Altivec assist exception", regs, SIGILL);
+       }
+
+       flush_altivec_to_thread(current);
+
+       err = emulate_altivec(regs);
+       if (err == 0) {
+               regs->nip += 4;         /* skip emulated instruction */
+               emulate_single_step(regs);
+               return;
+       }
+
+       if (err == -EFAULT) {
+               /* got an error reading the instruction */
+               _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
+       } else {
+               /* didn't recognize the instruction */
+               /* XXX quick hack for now: set the non-Java bit in the VSCR */
+               if (printk_ratelimit())
+                       printk(KERN_ERR "Unrecognized altivec instruction "
+                              "in %s at %lx\n", current->comm, regs->nip);
+               current->thread.vscr.u[3] |= 0x10000;
+       }
+}
+#endif /* CONFIG_ALTIVEC */
+
+#ifdef CONFIG_FSL_BOOKE
+void CacheLockingException(struct pt_regs *regs, unsigned long address,
+                          unsigned long error_code)
+{
+       /* We treat cache locking instructions from the user
+        * as priv ops, in the future we could try to do
+        * something smarter
+        */
+       if (error_code & (ESR_DLK|ESR_ILK))
+               _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
+       return;
+}
+#endif /* CONFIG_FSL_BOOKE */
+
+#ifdef CONFIG_SPE
+void SPEFloatingPointException(struct pt_regs *regs)
+{
+       unsigned long spefscr;
+       int fpexc_mode;
+       int code = 0;
+
+       spefscr = current->thread.spefscr;
+       fpexc_mode = current->thread.fpexc_mode;
+
+       /* Hardware does not neccessarily set sticky
+        * underflow/overflow/invalid flags */
+       if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
+               code = FPE_FLTOVF;
+               spefscr |= SPEFSCR_FOVFS;
+       }
+       else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
+               code = FPE_FLTUND;
+               spefscr |= SPEFSCR_FUNFS;
+       }
+       else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
+               code = FPE_FLTDIV;
+       else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
+               code = FPE_FLTINV;
+               spefscr |= SPEFSCR_FINVS;
+       }
+       else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
+               code = FPE_FLTRES;
+
+       current->thread.spefscr = spefscr;
+
+       _exception(SIGFPE, regs, code, regs->nip);
+       return;
+}
+#endif
+
+/*
+ * We enter here if we get an unrecoverable exception, that is, one
+ * that happened at a point where the RI (recoverable interrupt) bit
+ * in the MSR is 0.  This indicates that SRR0/1 are live, and that
+ * we therefore lost state by taking this exception.
+ */
+void unrecoverable_exception(struct pt_regs *regs)
+{
+       printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
+              regs->trap, regs->nip);
+       die("Unrecoverable exception", regs, SIGABRT);
+}
+
+#ifdef CONFIG_BOOKE_WDT
+/*
+ * Default handler for a Watchdog exception,
+ * spins until a reboot occurs
+ */
+void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
+{
+       /* Generic WatchdogHandler, implement your own */
+       mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
+       return;
+}
+
+void WatchdogException(struct pt_regs *regs)
+{
+       printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
+       WatchdogHandler(regs);
+}
+#endif
+
+/*
+ * We enter here if we discover during exception entry that we are
+ * running in supervisor mode with a userspace value in the stack pointer.
+ */
+void kernel_bad_stack(struct pt_regs *regs)
+{
+       printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
+              regs->gpr[1], regs->nip);
+       die("Bad kernel stack pointer", regs, SIGABRT);
+}
+
+void __init trap_init(void)
+{
+}
similarity index 76%
rename from arch/ppc64/kernel/vector.S
rename to arch/powerpc/kernel/vector.S
index b79d33e..66b3d03 100644 (file)
@@ -1,11 +1,26 @@
+#include <linux/config.h>
 #include <asm/ppc_asm.h>
-#include <asm/processor.h>
+#include <asm/reg.h>
 
 /*
  * The routines below are in assembler so we can closely control the
  * usage of floating-point registers.  These routines must be called
  * with preempt disabled.
  */
+#ifdef CONFIG_PPC32
+       .data
+fpzero:
+       .long   0
+fpone:
+       .long   0x3f800000      /* 1.0 in single-precision FP */
+fphalf:
+       .long   0x3f000000      /* 0.5 in single-precision FP */
+
+#define LDCONST(fr, name)      \
+       lis     r11,name@ha;    \
+       lfs     fr,name@l(r11)
+#else
+
        .section ".toc","aw"
 fpzero:
        .tc     FD_0_0[TC],0
@@ -14,32 +29,42 @@ fpone:
 fphalf:
        .tc     FD_3fe00000_0[TC],0x3fe0000000000000    /* 0.5 */
 
+#define LDCONST(fr, name)      \
+       lfd     fr,name@toc(r2)
+#endif
+
        .text
 /*
  * Internal routine to enable floating point and set FPSCR to 0.
  * Don't call it from C; it doesn't use the normal calling convention.
  */
 fpenable:
+#ifdef CONFIG_PPC32
+       stwu    r1,-64(r1)
+#else
+       stdu    r1,-64(r1)
+#endif
        mfmsr   r10
        ori     r11,r10,MSR_FP
        mtmsr   r11
        isync
-       stfd    fr31,-8(r1)
-       stfd    fr0,-16(r1)
-       stfd    fr1,-24(r1)
+       stfd    fr0,24(r1)
+       stfd    fr1,16(r1)
+       stfd    fr31,8(r1)
+       LDCONST(fr1, fpzero)
        mffs    fr31
-       lfd     fr1,fpzero@toc(r2)
        mtfsf   0xff,fr1
        blr
 
 fpdisable:
        mtlr    r12
        mtfsf   0xff,fr31
-       lfd     fr1,-24(r1)
-       lfd     fr0,-16(r1)
-       lfd     fr31,-8(r1)
+       lfd     fr31,8(r1)
+       lfd     fr1,16(r1)
+       lfd     fr0,24(r1)
        mtmsr   r10
        isync
+       addi    r1,r1,64
        blr
 
 /*
@@ -82,7 +107,7 @@ _GLOBAL(vsubfp)
 _GLOBAL(vmaddfp)
        mflr    r12
        bl      fpenable
-       stfd    fr2,-32(r1)
+       stfd    fr2,32(r1)
        li      r0,4
        mtctr   r0
        li      r7,0
@@ -93,7 +118,7 @@ _GLOBAL(vmaddfp)
        stfsx   fr0,r3,r7
        addi    r7,r7,4
        bdnz    1b
-       lfd     fr2,-32(r1)
+       lfd     fr2,32(r1)
        b       fpdisable
 
 /*
@@ -102,7 +127,7 @@ _GLOBAL(vmaddfp)
 _GLOBAL(vnmsubfp)
        mflr    r12
        bl      fpenable
-       stfd    fr2,-32(r1)
+       stfd    fr2,32(r1)
        li      r0,4
        mtctr   r0
        li      r7,0
@@ -113,7 +138,7 @@ _GLOBAL(vnmsubfp)
        stfsx   fr0,r3,r7
        addi    r7,r7,4
        bdnz    1b
-       lfd     fr2,-32(r1)
+       lfd     fr2,32(r1)
        b       fpdisable
 
 /*
@@ -124,7 +149,7 @@ _GLOBAL(vrefp)
        mflr    r12
        bl      fpenable
        li      r0,4
-       lfd     fr1,fpone@toc(r2)
+       LDCONST(fr1, fpone)
        mtctr   r0
        li      r6,0
 1:     lfsx    fr0,r4,r6
@@ -143,13 +168,13 @@ _GLOBAL(vrefp)
 _GLOBAL(vrsqrtefp)
        mflr    r12
        bl      fpenable
-       stfd    fr2,-32(r1)
-       stfd    fr3,-40(r1)
-       stfd    fr4,-48(r1)
-       stfd    fr5,-56(r1)
+       stfd    fr2,32(r1)
+       stfd    fr3,40(r1)
+       stfd    fr4,48(r1)
+       stfd    fr5,56(r1)
        li      r0,4
-       lfd     fr4,fpone@toc(r2)
-       lfd     fr5,fphalf@toc(r2)
+       LDCONST(fr4, fpone)
+       LDCONST(fr5, fphalf)
        mtctr   r0
        li      r6,0
 1:     lfsx    fr0,r4,r6
@@ -165,8 +190,8 @@ _GLOBAL(vrsqrtefp)
        stfsx   fr1,r3,r6
        addi    r6,r6,4
        bdnz    1b
-       lfd     fr5,-56(r1)
-       lfd     fr4,-48(r1)
-       lfd     fr3,-40(r1)
-       lfd     fr2,-32(r1)
+       lfd     fr5,56(r1)
+       lfd     fr4,48(r1)
+       lfd     fr3,40(r1)
+       lfd     fr2,32(r1)
        b       fpdisable
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
new file mode 100644 (file)
index 0000000..d4dfcfb
--- /dev/null
@@ -0,0 +1,279 @@
+#include <linux/config.h>
+#ifdef CONFIG_PPC64
+#include <asm/page.h>
+#else
+#define PAGE_SIZE      4096
+#endif
+#include <asm-generic/vmlinux.lds.h>
+
+#ifdef CONFIG_PPC64
+OUTPUT_ARCH(powerpc:common64)
+jiffies = jiffies_64;
+#else
+OUTPUT_ARCH(powerpc:common)
+jiffies = jiffies_64 + 4;
+#endif
+SECTIONS
+{
+  /* Sections to be discarded. */
+  /DISCARD/ : {
+    *(.exitcall.exit)
+    *(.exit.data)
+  }
+
+
+  /* Read-only sections, merged into text segment: */
+#ifdef CONFIG_PPC32
+  . = + SIZEOF_HEADERS;
+  .interp : { *(.interp) }
+  .hash          : { *(.hash)          }
+  .dynsym        : { *(.dynsym)                }
+  .dynstr        : { *(.dynstr)                }
+  .rel.text      : { *(.rel.text)              }
+  .rela.text     : { *(.rela.text)     }
+  .rel.data      : { *(.rel.data)              }
+  .rela.data     : { *(.rela.data)     }
+  .rel.rodata    : { *(.rel.rodata)    }
+  .rela.rodata   : { *(.rela.rodata)   }
+  .rel.got       : { *(.rel.got)               }
+  .rela.got      : { *(.rela.got)              }
+  .rel.ctors     : { *(.rel.ctors)     }
+  .rela.ctors    : { *(.rela.ctors)    }
+  .rel.dtors     : { *(.rel.dtors)     }
+  .rela.dtors    : { *(.rela.dtors)    }
+  .rel.bss       : { *(.rel.bss)               }
+  .rela.bss      : { *(.rela.bss)              }
+  .rel.plt       : { *(.rel.plt)               }
+  .rela.plt      : { *(.rela.plt)              }
+/*  .init          : { *(.init)        } =0*/
+  .plt : { *(.plt) }
+#endif
+  .text : {
+    *(.text .text.*)
+    SCHED_TEXT
+    LOCK_TEXT
+    KPROBES_TEXT
+    *(.fixup)
+#ifdef CONFIG_PPC32
+    *(.got1)
+    __got2_start = .;
+    *(.got2)
+    __got2_end = .;
+#else
+    . = ALIGN(PAGE_SIZE);
+    _etext = .;
+#endif
+  }
+#ifdef CONFIG_PPC32
+  _etext = .;
+  PROVIDE (etext = .);
+
+  RODATA
+  .fini      : { *(.fini)    } =0
+  .ctors     : { *(.ctors)   }
+  .dtors     : { *(.dtors)   }
+
+  .fixup   : { *(.fixup) }
+#endif
+
+       __ex_table : {
+               __start___ex_table = .;
+               *(__ex_table)
+               __stop___ex_table = .;
+       }
+
+       __bug_table : {
+               __start___bug_table = .;
+               *(__bug_table)
+               __stop___bug_table = .;
+       }
+
+#ifdef CONFIG_PPC64
+       __ftr_fixup : {
+               __start___ftr_fixup = .;
+               *(__ftr_fixup)
+               __stop___ftr_fixup = .;
+       }
+
+  RODATA
+#endif
+
+#ifdef CONFIG_PPC32
+  /* Read-write section, merged into data segment: */
+  . = ALIGN(PAGE_SIZE);
+  _sdata = .;
+  .data    :
+  {
+    *(.data)
+    *(.data1)
+    *(.sdata)
+    *(.sdata2)
+    *(.got.plt) *(.got)
+    *(.dynamic)
+    CONSTRUCTORS
+  }
+
+  . = ALIGN(PAGE_SIZE);
+  __nosave_begin = .;
+  .data_nosave : { *(.data.nosave) }
+  . = ALIGN(PAGE_SIZE);
+  __nosave_end = .;
+
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+  _edata  =  .;
+  PROVIDE (edata = .);
+
+  . = ALIGN(8192);
+  .data.init_task : { *(.data.init_task) }
+#endif
+
+  /* will be freed after init */
+  . = ALIGN(PAGE_SIZE);
+  __init_begin = .;
+  .init.text : {
+       _sinittext = .;
+       *(.init.text)
+       _einittext = .;
+  }
+#ifdef CONFIG_PPC32
+  /* .exit.text is discarded at runtime, not link time,
+     to deal with references from __bug_table */
+  .exit.text : { *(.exit.text) }
+#endif
+  .init.data : {
+    *(.init.data);
+    __vtop_table_begin = .;
+    *(.vtop_fixup);
+    __vtop_table_end = .;
+    __ptov_table_begin = .;
+    *(.ptov_fixup);
+    __ptov_table_end = .;
+  }
+
+  . = ALIGN(16);
+  .init.setup : {
+    __setup_start = .;
+    *(.init.setup)
+    __setup_end = .;
+  }
+
+  .initcall.init : {
+       __initcall_start = .;
+       *(.initcall1.init)
+       *(.initcall2.init)
+       *(.initcall3.init)
+       *(.initcall4.init)
+       *(.initcall5.init)
+       *(.initcall6.init)
+       *(.initcall7.init)
+       __initcall_end = .;
+  }
+
+  .con_initcall.init : {
+    __con_initcall_start = .;
+    *(.con_initcall.init)
+    __con_initcall_end = .;
+  }
+
+  SECURITY_INIT
+
+#ifdef CONFIG_PPC32
+  __start___ftr_fixup = .;
+  __ftr_fixup : { *(__ftr_fixup) }
+  __stop___ftr_fixup = .;
+#else
+  . = ALIGN(PAGE_SIZE);
+  .init.ramfs : {
+    __initramfs_start = .;
+    *(.init.ramfs)
+    __initramfs_end = .;
+  }
+#endif
+
+#ifdef CONFIG_PPC32
+  . = ALIGN(32);
+#endif
+  .data.percpu : {
+    __per_cpu_start = .;
+    *(.data.percpu)
+    __per_cpu_end = .;
+  }
+
+ . = ALIGN(PAGE_SIZE);
+#ifdef CONFIG_PPC64
+ . = ALIGN(16384);
+ __init_end = .;
+ /* freed after init ends here */
+
+ /* Read/write sections */
+ . = ALIGN(PAGE_SIZE);
+ . = ALIGN(16384);
+ _sdata = .;
+ /* The initial task and kernel stack */
+ .data.init_task : {
+      *(.data.init_task)
+      }
+
+ . = ALIGN(PAGE_SIZE);
+ .data.page_aligned : {
+      *(.data.page_aligned)
+      }
+
+ .data.cacheline_aligned : {
+      *(.data.cacheline_aligned)
+      }
+
+ .data : {
+      *(.data .data.rel* .toc1)
+      *(.branch_lt)
+      }
+
+ .opd : {
+      *(.opd)
+      }
+
+ .got : {
+      __toc_start = .;
+      *(.got)
+      *(.toc)
+      . = ALIGN(PAGE_SIZE);
+      _edata = .;
+      }
+
+  . = ALIGN(PAGE_SIZE);
+#else
+  __initramfs_start = .;
+  .init.ramfs : {
+    *(.init.ramfs)
+  }
+  __initramfs_end = .;
+
+  . = ALIGN(4096);
+  __init_end = .;
+
+  . = ALIGN(4096);
+  _sextratext = .;
+  _eextratext = .;
+
+  __bss_start = .;
+#endif
+
+  .bss : {
+    __bss_start = .;
+   *(.sbss) *(.scommon)
+   *(.dynbss)
+   *(.bss)
+   *(COMMON)
+  __bss_stop = .;
+  }
+
+#ifdef CONFIG_PPC64
+  . = ALIGN(PAGE_SIZE);
+#endif
+  _end = . ;
+#ifdef CONFIG_PPC32
+  PROVIDE (end = .);
+#endif
+}
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
new file mode 100644 (file)
index 0000000..30367a0
--- /dev/null
@@ -0,0 +1,13 @@
+#
+# Makefile for ppc-specific library files..
+#
+
+obj-y                  := strcase.o string.o
+obj-$(CONFIG_PPC32)    += div64.o copy_32.o checksum_32.o
+obj-$(CONFIG_PPC64)    += copypage_64.o copyuser_64.o memcpy_64.o \
+                          usercopy_64.o sstep.o checksum_64.o mem_64.o
+obj-$(CONFIG_PPC_ISERIES) += e2a.o
+ifeq ($(CONFIG_PPC64),y)
+obj-$(CONFIG_SMP)      += locks.o
+endif
+
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
new file mode 100644 (file)
index 0000000..7874e8a
--- /dev/null
@@ -0,0 +1,225 @@
+/*
+ * This file contains assembly-language implementations
+ * of IP-style 1's complement checksum routines.
+ *     
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
+ */
+
+#include <linux/sys.h>
+#include <asm/processor.h>
+#include <asm/errno.h>
+#include <asm/ppc_asm.h>
+
+       .text
+
+/*
+ * ip_fast_csum(buf, len) -- Optimized for IP header
+ * len is in words and is always >= 5.
+ */
+_GLOBAL(ip_fast_csum)
+       lwz     r0,0(r3)
+       lwzu    r5,4(r3)
+       addic.  r4,r4,-2
+       addc    r0,r0,r5
+       mtctr   r4
+       blelr-
+1:     lwzu    r4,4(r3)
+       adde    r0,r0,r4
+       bdnz    1b
+       addze   r0,r0           /* add in final carry */
+       rlwinm  r3,r0,16,0,31   /* fold two halves together */
+       add     r3,r0,r3
+       not     r3,r3
+       srwi    r3,r3,16
+       blr
+
+/*
+ * Compute checksum of TCP or UDP pseudo-header:
+ *   csum_tcpudp_magic(saddr, daddr, len, proto, sum)
+ */    
+_GLOBAL(csum_tcpudp_magic)
+       rlwimi  r5,r6,16,0,15   /* put proto in upper half of len */
+       addc    r0,r3,r4        /* add 4 32-bit words together */
+       adde    r0,r0,r5
+       adde    r0,r0,r7
+       addze   r0,r0           /* add in final carry */
+       rlwinm  r3,r0,16,0,31   /* fold two halves together */
+       add     r3,r0,r3
+       not     r3,r3
+       srwi    r3,r3,16
+       blr
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * csum_partial(buff, len, sum)
+ */
+_GLOBAL(csum_partial)
+       addic   r0,r5,0
+       subi    r3,r3,4
+       srwi.   r6,r4,2
+       beq     3f              /* if we're doing < 4 bytes */
+       andi.   r5,r3,2         /* Align buffer to longword boundary */
+       beq+    1f
+       lhz     r5,4(r3)        /* do 2 bytes to get aligned */
+       addi    r3,r3,2
+       subi    r4,r4,2
+       addc    r0,r0,r5
+       srwi.   r6,r4,2         /* # words to do */
+       beq     3f
+1:     mtctr   r6
+2:     lwzu    r5,4(r3)        /* the bdnz has zero overhead, so it should */
+       adde    r0,r0,r5        /* be unnecessary to unroll this loop */
+       bdnz    2b
+       andi.   r4,r4,3
+3:     cmpwi   0,r4,2
+       blt+    4f
+       lhz     r5,4(r3)
+       addi    r3,r3,2
+       subi    r4,r4,2
+       adde    r0,r0,r5
+4:     cmpwi   0,r4,1
+       bne+    5f
+       lbz     r5,4(r3)
+       slwi    r5,r5,8         /* Upper byte of word */
+       adde    r0,r0,r5
+5:     addze   r3,r0           /* add in final carry */
+       blr
+
+/*
+ * Computes the checksum of a memory block at src, length len,
+ * and adds in "sum" (32-bit), while copying the block to dst.
+ * If an access exception occurs on src or dst, it stores -EFAULT
+ * to *src_err or *dst_err respectively, and (for an error on
+ * src) zeroes the rest of dst.
+ *
+ * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
+ */
+_GLOBAL(csum_partial_copy_generic)
+       addic   r0,r6,0
+       subi    r3,r3,4
+       subi    r4,r4,4
+       srwi.   r6,r5,2
+       beq     3f              /* if we're doing < 4 bytes */
+       andi.   r9,r4,2         /* Align dst to longword boundary */
+       beq+    1f
+81:    lhz     r6,4(r3)        /* do 2 bytes to get aligned */
+       addi    r3,r3,2
+       subi    r5,r5,2
+91:    sth     r6,4(r4)
+       addi    r4,r4,2
+       addc    r0,r0,r6
+       srwi.   r6,r5,2         /* # words to do */
+       beq     3f
+1:     srwi.   r6,r5,4         /* # groups of 4 words to do */
+       beq     10f
+       mtctr   r6
+71:    lwz     r6,4(r3)
+72:    lwz     r9,8(r3)
+73:    lwz     r10,12(r3)
+74:    lwzu    r11,16(r3)
+       adde    r0,r0,r6
+75:    stw     r6,4(r4)
+       adde    r0,r0,r9
+76:    stw     r9,8(r4)
+       adde    r0,r0,r10
+77:    stw     r10,12(r4)
+       adde    r0,r0,r11
+78:    stwu    r11,16(r4)
+       bdnz    71b
+10:    rlwinm. r6,r5,30,30,31  /* # words left to do */
+       beq     13f
+       mtctr   r6
+82:    lwzu    r9,4(r3)
+92:    stwu    r9,4(r4)
+       adde    r0,r0,r9
+       bdnz    82b
+13:    andi.   r5,r5,3
+3:     cmpwi   0,r5,2
+       blt+    4f
+83:    lhz     r6,4(r3)
+       addi    r3,r3,2
+       subi    r5,r5,2
+93:    sth     r6,4(r4)
+       addi    r4,r4,2
+       adde    r0,r0,r6
+4:     cmpwi   0,r5,1
+       bne+    5f
+84:    lbz     r6,4(r3)
+94:    stb     r6,4(r4)
+       slwi    r6,r6,8         /* Upper byte of word */
+       adde    r0,r0,r6
+5:     addze   r3,r0           /* add in final carry */
+       blr
+
+/* These shouldn't go in the fixup section, since that would
+   cause the ex_table addresses to get out of order. */
+
+src_error_4:
+       mfctr   r6              /* update # bytes remaining from ctr */
+       rlwimi  r5,r6,4,0,27
+       b       79f
+src_error_1:
+       li      r6,0
+       subi    r5,r5,2
+95:    sth     r6,4(r4)
+       addi    r4,r4,2
+79:    srwi.   r6,r5,2
+       beq     3f
+       mtctr   r6
+src_error_2:
+       li      r6,0
+96:    stwu    r6,4(r4)
+       bdnz    96b
+3:     andi.   r5,r5,3
+       beq     src_error
+src_error_3:
+       li      r6,0
+       mtctr   r5
+       addi    r4,r4,3
+97:    stbu    r6,1(r4)
+       bdnz    97b
+src_error:
+       cmpwi   0,r7,0
+       beq     1f
+       li      r6,-EFAULT
+       stw     r6,0(r7)
+1:     addze   r3,r0
+       blr
+
+dst_error:
+       cmpwi   0,r8,0
+       beq     1f
+       li      r6,-EFAULT
+       stw     r6,0(r8)
+1:     addze   r3,r0
+       blr
+
+.section __ex_table,"a"
+       .long   81b,src_error_1
+       .long   91b,dst_error
+       .long   71b,src_error_4
+       .long   72b,src_error_4
+       .long   73b,src_error_4
+       .long   74b,src_error_4
+       .long   75b,dst_error
+       .long   76b,dst_error
+       .long   77b,dst_error
+       .long   78b,dst_error
+       .long   82b,src_error_2
+       .long   92b,dst_error
+       .long   83b,src_error_3
+       .long   93b,dst_error
+       .long   84b,src_error_3
+       .long   94b,dst_error
+       .long   95b,dst_error
+       .long   96b,dst_error
+       .long   97b,dst_error
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
new file mode 100644 (file)
index 0000000..ef96c6c
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * This file contains assembly-language implementations
+ * of IP-style 1's complement checksum routines.
+ *     
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
+ */
+
+#include <linux/sys.h>
+#include <asm/processor.h>
+#include <asm/errno.h>
+#include <asm/ppc_asm.h>
+
+/*
+ * ip_fast_csum(r3=buf, r4=len) -- Optimized for IP header
+ * len is in words and is always >= 5.
+ *
+ * In practice len == 5, but this is not guaranteed.  So this code does not
+ * attempt to use doubleword instructions.
+ */
+_GLOBAL(ip_fast_csum)
+       lwz     r0,0(r3)
+       lwzu    r5,4(r3)
+       addic.  r4,r4,-2
+       addc    r0,r0,r5
+       mtctr   r4
+       blelr-
+1:     lwzu    r4,4(r3)
+       adde    r0,r0,r4
+       bdnz    1b
+       addze   r0,r0           /* add in final carry */
+        rldicl  r4,r0,32,0      /* fold two 32-bit halves together */
+        add     r0,r0,r4
+        srdi    r0,r0,32
+       rlwinm  r3,r0,16,0,31   /* fold two halves together */
+       add     r3,r0,r3
+       not     r3,r3
+       srwi    r3,r3,16
+       blr
+
+/*
+ * Compute checksum of TCP or UDP pseudo-header:
+ *   csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum)
+ * No real gain trying to do this specially for 64 bit, but
+ * the 32 bit addition may spill into the upper bits of
+ * the doubleword so we still must fold it down from 64.
+ */    
+_GLOBAL(csum_tcpudp_magic)
+       rlwimi  r5,r6,16,0,15   /* put proto in upper half of len */
+       addc    r0,r3,r4        /* add 4 32-bit words together */
+       adde    r0,r0,r5
+       adde    r0,r0,r7
+        rldicl  r4,r0,32,0      /* fold 64 bit value */
+        add     r0,r4,r0
+        srdi    r0,r0,32
+       rlwinm  r3,r0,16,0,31   /* fold two halves together */
+       add     r3,r0,r3
+       not     r3,r3
+       srwi    r3,r3,16
+       blr
+
+/*
+ * Computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit).
+ *
+ * This code assumes at least halfword alignment, though the length
+ * can be any number of bytes.  The sum is accumulated in r5.
+ *
+ * csum_partial(r3=buff, r4=len, r5=sum)
+ */
+_GLOBAL(csum_partial)
+        subi   r3,r3,8         /* we'll offset by 8 for the loads */
+        srdi.  r6,r4,3         /* divide by 8 for doubleword count */
+        addic   r5,r5,0         /* clear carry */
+        beq    3f              /* if we're doing < 8 bytes */
+        andi.  r0,r3,2         /* aligned on a word boundary already? */
+        beq+   1f
+        lhz     r6,8(r3)        /* do 2 bytes to get aligned */
+        addi    r3,r3,2
+        subi    r4,r4,2
+        addc    r5,r5,r6
+        srdi.   r6,r4,3         /* recompute number of doublewords */
+        beq     3f              /* any left? */
+1:      mtctr   r6
+2:      ldu     r6,8(r3)        /* main sum loop */
+        adde    r5,r5,r6
+        bdnz    2b
+        andi.  r4,r4,7         /* compute bytes left to sum after doublewords */
+3:     cmpwi   0,r4,4          /* is at least a full word left? */
+       blt     4f
+       lwz     r6,8(r3)        /* sum this word */
+       addi    r3,r3,4
+       subi    r4,r4,4
+       adde    r5,r5,r6
+4:     cmpwi   0,r4,2          /* is at least a halfword left? */
+        blt+   5f
+        lhz     r6,8(r3)        /* sum this halfword */
+        addi    r3,r3,2
+        subi    r4,r4,2
+        adde    r5,r5,r6
+5:     cmpwi   0,r4,1          /* is at least a byte left? */
+        bne+    6f
+        lbz     r6,8(r3)        /* sum this byte */
+        slwi    r6,r6,8         /* this byte is assumed to be the upper byte of a halfword */
+        adde    r5,r5,r6
+6:      addze  r5,r5           /* add in final carry */
+       rldicl  r4,r5,32,0      /* fold two 32-bit halves together */
+        add     r3,r4,r5
+        srdi    r3,r3,32
+        blr
+
+/*
+ * Computes the checksum of a memory block at src, length len,
+ * and adds in "sum" (32-bit), while copying the block to dst.
+ * If an access exception occurs on src or dst, it stores -EFAULT
+ * to *src_err or *dst_err respectively, and (for an error on
+ * src) zeroes the rest of dst.
+ *
+ * This code needs to be reworked to take advantage of 64 bit sum+copy.
+ * However, due to tokenring halfword alignment problems this will be very
+ * tricky.  For now we'll leave it until we instrument it somehow.
+ *
+ * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
+ */
+_GLOBAL(csum_partial_copy_generic)
+       addic   r0,r6,0
+       subi    r3,r3,4
+       subi    r4,r4,4
+       srwi.   r6,r5,2
+       beq     3f              /* if we're doing < 4 bytes */
+       andi.   r9,r4,2         /* Align dst to longword boundary */
+       beq+    1f
+81:    lhz     r6,4(r3)        /* do 2 bytes to get aligned */
+       addi    r3,r3,2
+       subi    r5,r5,2
+91:    sth     r6,4(r4)
+       addi    r4,r4,2
+       addc    r0,r0,r6
+       srwi.   r6,r5,2         /* # words to do */
+       beq     3f
+1:     mtctr   r6
+82:    lwzu    r6,4(r3)        /* the bdnz has zero overhead, so it should */
+92:    stwu    r6,4(r4)        /* be unnecessary to unroll this loop */
+       adde    r0,r0,r6
+       bdnz    82b
+       andi.   r5,r5,3
+3:     cmpwi   0,r5,2
+       blt+    4f
+83:    lhz     r6,4(r3)
+       addi    r3,r3,2
+       subi    r5,r5,2
+93:    sth     r6,4(r4)
+       addi    r4,r4,2
+       adde    r0,r0,r6
+4:     cmpwi   0,r5,1
+       bne+    5f
+84:    lbz     r6,4(r3)
+94:    stb     r6,4(r4)
+       slwi    r6,r6,8         /* Upper byte of word */
+       adde    r0,r0,r6
+5:     addze   r3,r0           /* add in final carry (unlikely with 64-bit regs) */
+        rldicl  r4,r3,32,0      /* fold 64 bit value */
+        add     r3,r4,r3
+        srdi    r3,r3,32
+       blr
+
+/* These shouldn't go in the fixup section, since that would
+   cause the ex_table addresses to get out of order. */
+
+       .globl src_error_1
+src_error_1:
+       li      r6,0
+       subi    r5,r5,2
+95:    sth     r6,4(r4)
+       addi    r4,r4,2
+       srwi.   r6,r5,2
+       beq     3f
+       mtctr   r6
+       .globl src_error_2
+src_error_2:
+       li      r6,0
+96:    stwu    r6,4(r4)
+       bdnz    96b
+3:     andi.   r5,r5,3
+       beq     src_error
+       .globl src_error_3
+src_error_3:
+       li      r6,0
+       mtctr   r5
+       addi    r4,r4,3
+97:    stbu    r6,1(r4)
+       bdnz    97b
+       .globl src_error
+src_error:
+       cmpdi   0,r7,0
+       beq     1f
+       li      r6,-EFAULT
+       stw     r6,0(r7)
+1:     addze   r3,r0
+       blr
+
+       .globl dst_error
+dst_error:
+       cmpdi   0,r8,0
+       beq     1f
+       li      r6,-EFAULT
+       stw     r6,0(r8)
+1:     addze   r3,r0
+       blr
+
+.section __ex_table,"a"
+       .align  3
+       .llong  81b,src_error_1
+       .llong  91b,dst_error
+       .llong  82b,src_error_2
+       .llong  92b,dst_error
+       .llong  83b,src_error_3
+       .llong  93b,dst_error
+       .llong  84b,src_error_3
+       .llong  94b,dst_error
+       .llong  95b,dst_error
+       .llong  96b,dst_error
+       .llong  97b,dst_error
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
new file mode 100644 (file)
index 0000000..bee5141
--- /dev/null
@@ -0,0 +1,543 @@
+/*
+ * Memory copy functions for 32-bit PowerPC.
+ *
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+#include <asm/errno.h>
+#include <asm/ppc_asm.h>
+
+#define COPY_16_BYTES          \
+       lwz     r7,4(r4);       \
+       lwz     r8,8(r4);       \
+       lwz     r9,12(r4);      \
+       lwzu    r10,16(r4);     \
+       stw     r7,4(r6);       \
+       stw     r8,8(r6);       \
+       stw     r9,12(r6);      \
+       stwu    r10,16(r6)
+
+#define COPY_16_BYTES_WITHEX(n)        \
+8 ## n ## 0:                   \
+       lwz     r7,4(r4);       \
+8 ## n ## 1:                   \
+       lwz     r8,8(r4);       \
+8 ## n ## 2:                   \
+       lwz     r9,12(r4);      \
+8 ## n ## 3:                   \
+       lwzu    r10,16(r4);     \
+8 ## n ## 4:                   \
+       stw     r7,4(r6);       \
+8 ## n ## 5:                   \
+       stw     r8,8(r6);       \
+8 ## n ## 6:                   \
+       stw     r9,12(r6);      \
+8 ## n ## 7:                   \
+       stwu    r10,16(r6)
+
+#define COPY_16_BYTES_EXCODE(n)                        \
+9 ## n ## 0:                                   \
+       addi    r5,r5,-(16 * n);                \
+       b       104f;                           \
+9 ## n ## 1:                                   \
+       addi    r5,r5,-(16 * n);                \
+       b       105f;                           \
+.section __ex_table,"a";                       \
+       .align  2;                              \
+       .long   8 ## n ## 0b,9 ## n ## 0b;      \
+       .long   8 ## n ## 1b,9 ## n ## 0b;      \
+       .long   8 ## n ## 2b,9 ## n ## 0b;      \
+       .long   8 ## n ## 3b,9 ## n ## 0b;      \
+       .long   8 ## n ## 4b,9 ## n ## 1b;      \
+       .long   8 ## n ## 5b,9 ## n ## 1b;      \
+       .long   8 ## n ## 6b,9 ## n ## 1b;      \
+       .long   8 ## n ## 7b,9 ## n ## 1b;      \
+       .text
+
+       .text
+       .stabs  "arch/powerpc/lib/",N_SO,0,0,0f
+       .stabs  "copy32.S",N_SO,0,0,0f
+0:
+
+CACHELINE_BYTES = L1_CACHE_BYTES
+LG_CACHELINE_BYTES = L1_CACHE_SHIFT
+CACHELINE_MASK = (L1_CACHE_BYTES-1)
+
+/*
+ * Use dcbz on the complete cache lines in the destination
+ * to set them to zero.  This requires that the destination
+ * area is cacheable.  -- paulus
+ */
+_GLOBAL(cacheable_memzero)
+       mr      r5,r4
+       li      r4,0
+       addi    r6,r3,-4
+       cmplwi  0,r5,4
+       blt     7f
+       stwu    r4,4(r6)
+       beqlr
+       andi.   r0,r6,3
+       add     r5,r0,r5
+       subf    r6,r0,r6
+       clrlwi  r7,r6,32-LG_CACHELINE_BYTES
+       add     r8,r7,r5
+       srwi    r9,r8,LG_CACHELINE_BYTES
+       addic.  r9,r9,-1        /* total number of complete cachelines */
+       ble     2f
+       xori    r0,r7,CACHELINE_MASK & ~3
+       srwi.   r0,r0,2
+       beq     3f
+       mtctr   r0
+4:     stwu    r4,4(r6)
+       bdnz    4b
+3:     mtctr   r9
+       li      r7,4
+#if !defined(CONFIG_8xx)
+10:    dcbz    r7,r6
+#else
+10:    stw     r4, 4(r6)
+       stw     r4, 8(r6)
+       stw     r4, 12(r6)
+       stw     r4, 16(r6)
+#if CACHE_LINE_SIZE >= 32
+       stw     r4, 20(r6)
+       stw     r4, 24(r6)
+       stw     r4, 28(r6)
+       stw     r4, 32(r6)
+#endif /* CACHE_LINE_SIZE */
+#endif
+       addi    r6,r6,CACHELINE_BYTES
+       bdnz    10b
+       clrlwi  r5,r8,32-LG_CACHELINE_BYTES
+       addi    r5,r5,4
+2:     srwi    r0,r5,2
+       mtctr   r0
+       bdz     6f
+1:     stwu    r4,4(r6)
+       bdnz    1b
+6:     andi.   r5,r5,3
+7:     cmpwi   0,r5,0
+       beqlr
+       mtctr   r5
+       addi    r6,r6,3
+8:     stbu    r4,1(r6)
+       bdnz    8b
+       blr
+
+_GLOBAL(memset)
+       rlwimi  r4,r4,8,16,23
+       rlwimi  r4,r4,16,0,15
+       addi    r6,r3,-4
+       cmplwi  0,r5,4
+       blt     7f
+       stwu    r4,4(r6)
+       beqlr
+       andi.   r0,r6,3
+       add     r5,r0,r5
+       subf    r6,r0,r6
+       srwi    r0,r5,2
+       mtctr   r0
+       bdz     6f
+1:     stwu    r4,4(r6)
+       bdnz    1b
+6:     andi.   r5,r5,3
+7:     cmpwi   0,r5,0
+       beqlr
+       mtctr   r5
+       addi    r6,r6,3
+8:     stbu    r4,1(r6)
+       bdnz    8b
+       blr
+
+/*
+ * This version uses dcbz on the complete cache lines in the
+ * destination area to reduce memory traffic.  This requires that
+ * the destination area is cacheable.
+ * We only use this version if the source and dest don't overlap.
+ * -- paulus.
+ */
+_GLOBAL(cacheable_memcpy)
+       add     r7,r3,r5                /* test if the src & dst overlap */
+       add     r8,r4,r5
+       cmplw   0,r4,r7
+       cmplw   1,r3,r8
+       crand   0,0,4                   /* cr0.lt &= cr1.lt */
+       blt     memcpy                  /* if regions overlap */
+
+       addi    r4,r4,-4
+       addi    r6,r3,-4
+       neg     r0,r3
+       andi.   r0,r0,CACHELINE_MASK    /* # bytes to start of cache line */
+       beq     58f
+
+       cmplw   0,r5,r0                 /* is this more than total to do? */
+       blt     63f                     /* if not much to do */
+       andi.   r8,r0,3                 /* get it word-aligned first */
+       subf    r5,r0,r5
+       mtctr   r8
+       beq+    61f
+70:    lbz     r9,4(r4)                /* do some bytes */
+       stb     r9,4(r6)
+       addi    r4,r4,1
+       addi    r6,r6,1
+       bdnz    70b
+61:    srwi.   r0,r0,2
+       mtctr   r0
+       beq     58f
+72:    lwzu    r9,4(r4)                /* do some words */
+       stwu    r9,4(r6)
+       bdnz    72b
+
+58:    srwi.   r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+       clrlwi  r5,r5,32-LG_CACHELINE_BYTES
+       li      r11,4
+       mtctr   r0
+       beq     63f
+53:
+#if !defined(CONFIG_8xx)
+       dcbz    r11,r6
+#endif
+       COPY_16_BYTES
+#if L1_CACHE_BYTES >= 32
+       COPY_16_BYTES
+#if L1_CACHE_BYTES >= 64
+       COPY_16_BYTES
+       COPY_16_BYTES
+#if L1_CACHE_BYTES >= 128
+       COPY_16_BYTES
+       COPY_16_BYTES
+       COPY_16_BYTES
+       COPY_16_BYTES
+#endif
+#endif
+#endif
+       bdnz    53b
+
+63:    srwi.   r0,r5,2
+       mtctr   r0
+       beq     64f
+30:    lwzu    r0,4(r4)
+       stwu    r0,4(r6)
+       bdnz    30b
+
+64:    andi.   r0,r5,3
+       mtctr   r0
+       beq+    65f
+40:    lbz     r0,4(r4)
+       stb     r0,4(r6)
+       addi    r4,r4,1
+       addi    r6,r6,1
+       bdnz    40b
+65:    blr
+
+_GLOBAL(memmove)
+       cmplw   0,r3,r4
+       bgt     backwards_memcpy
+       /* fall through */
+
+_GLOBAL(memcpy)
+       srwi.   r7,r5,3
+       addi    r6,r3,-4
+       addi    r4,r4,-4
+       beq     2f                      /* if less than 8 bytes to do */
+       andi.   r0,r6,3                 /* get dest word aligned */
+       mtctr   r7
+       bne     5f
+1:     lwz     r7,4(r4)
+       lwzu    r8,8(r4)
+       stw     r7,4(r6)
+       stwu    r8,8(r6)
+       bdnz    1b
+       andi.   r5,r5,7
+2:     cmplwi  0,r5,4
+       blt     3f
+       lwzu    r0,4(r4)
+       addi    r5,r5,-4
+       stwu    r0,4(r6)
+3:     cmpwi   0,r5,0
+       beqlr
+       mtctr   r5
+       addi    r4,r4,3
+       addi    r6,r6,3
+4:     lbzu    r0,1(r4)
+       stbu    r0,1(r6)
+       bdnz    4b
+       blr
+5:     subfic  r0,r0,4
+       mtctr   r0
+6:     lbz     r7,4(r4)
+       addi    r4,r4,1
+       stb     r7,4(r6)
+       addi    r6,r6,1
+       bdnz    6b
+       subf    r5,r0,r5
+       rlwinm. r7,r5,32-3,3,31
+       beq     2b
+       mtctr   r7
+       b       1b
+
+_GLOBAL(backwards_memcpy)
+       rlwinm. r7,r5,32-3,3,31         /* r0 = r5 >> 3 */
+       add     r6,r3,r5
+       add     r4,r4,r5
+       beq     2f
+       andi.   r0,r6,3
+       mtctr   r7
+       bne     5f
+1:     lwz     r7,-4(r4)
+       lwzu    r8,-8(r4)
+       stw     r7,-4(r6)
+       stwu    r8,-8(r6)
+       bdnz    1b
+       andi.   r5,r5,7
+2:     cmplwi  0,r5,4
+       blt     3f
+       lwzu    r0,-4(r4)
+       subi    r5,r5,4
+       stwu    r0,-4(r6)
+3:     cmpwi   0,r5,0
+       beqlr
+       mtctr   r5
+4:     lbzu    r0,-1(r4)
+       stbu    r0,-1(r6)
+       bdnz    4b
+       blr
+5:     mtctr   r0
+6:     lbzu    r7,-1(r4)
+       stbu    r7,-1(r6)
+       bdnz    6b
+       subf    r5,r0,r5
+       rlwinm. r7,r5,32-3,3,31
+       beq     2b
+       mtctr   r7
+       b       1b
+
+_GLOBAL(__copy_tofrom_user)
+       addi    r4,r4,-4
+       addi    r6,r3,-4
+       neg     r0,r3
+       andi.   r0,r0,CACHELINE_MASK    /* # bytes to start of cache line */
+       beq     58f
+
+       cmplw   0,r5,r0                 /* is this more than total to do? */
+       blt     63f                     /* if not much to do */
+       andi.   r8,r0,3                 /* get it word-aligned first */
+       mtctr   r8
+       beq+    61f
+70:    lbz     r9,4(r4)                /* do some bytes */
+71:    stb     r9,4(r6)
+       addi    r4,r4,1
+       addi    r6,r6,1
+       bdnz    70b
+61:    subf    r5,r0,r5
+       srwi.   r0,r0,2
+       mtctr   r0
+       beq     58f
+72:    lwzu    r9,4(r4)                /* do some words */
+73:    stwu    r9,4(r6)
+       bdnz    72b
+
+       .section __ex_table,"a"
+       .align  2
+       .long   70b,100f
+       .long   71b,101f
+       .long   72b,102f
+       .long   73b,103f
+       .text
+
+58:    srwi.   r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+       clrlwi  r5,r5,32-LG_CACHELINE_BYTES
+       li      r11,4
+       beq     63f
+
+#ifdef CONFIG_8xx
+       /* Don't use prefetch on 8xx */
+       mtctr   r0
+       li      r0,0
+53:    COPY_16_BYTES_WITHEX(0)
+       bdnz    53b
+
+#else /* not CONFIG_8xx */
+       /* Here we decide how far ahead to prefetch the source */
+       li      r3,4
+       cmpwi   r0,1
+       li      r7,0
+       ble     114f
+       li      r7,1
+#if MAX_COPY_PREFETCH > 1
+       /* Heuristically, for large transfers we prefetch
+          MAX_COPY_PREFETCH cachelines ahead.  For small transfers
+          we prefetch 1 cacheline ahead. */
+       cmpwi   r0,MAX_COPY_PREFETCH
+       ble     112f
+       li      r7,MAX_COPY_PREFETCH
+112:   mtctr   r7
+111:   dcbt    r3,r4
+       addi    r3,r3,CACHELINE_BYTES
+       bdnz    111b
+#else
+       dcbt    r3,r4
+       addi    r3,r3,CACHELINE_BYTES
+#endif /* MAX_COPY_PREFETCH > 1 */
+
+114:   subf    r8,r7,r0
+       mr      r0,r7
+       mtctr   r8
+
+53:    dcbt    r3,r4
+54:    dcbz    r11,r6
+       .section __ex_table,"a"
+       .align  2
+       .long   54b,105f
+       .text
+/* the main body of the cacheline loop */
+       COPY_16_BYTES_WITHEX(0)
+#if L1_CACHE_BYTES >= 32
+       COPY_16_BYTES_WITHEX(1)
+#if L1_CACHE_BYTES >= 64
+       COPY_16_BYTES_WITHEX(2)
+       COPY_16_BYTES_WITHEX(3)
+#if L1_CACHE_BYTES >= 128
+       COPY_16_BYTES_WITHEX(4)
+       COPY_16_BYTES_WITHEX(5)
+       COPY_16_BYTES_WITHEX(6)
+       COPY_16_BYTES_WITHEX(7)
+#endif
+#endif
+#endif
+       bdnz    53b
+       cmpwi   r0,0
+       li      r3,4
+       li      r7,0
+       bne     114b
+#endif /* CONFIG_8xx */
+
+63:    srwi.   r0,r5,2
+       mtctr   r0
+       beq     64f
+30:    lwzu    r0,4(r4)
+31:    stwu    r0,4(r6)
+       bdnz    30b
+
+64:    andi.   r0,r5,3
+       mtctr   r0
+       beq+    65f
+40:    lbz     r0,4(r4)
+41:    stb     r0,4(r6)
+       addi    r4,r4,1
+       addi    r6,r6,1
+       bdnz    40b
+65:    li      r3,0
+       blr
+
+/* read fault, initial single-byte copy */
+100:   li      r9,0
+       b       90f
+/* write fault, initial single-byte copy */
+101:   li      r9,1
+90:    subf    r5,r8,r5
+       li      r3,0
+       b       99f
+/* read fault, initial word copy */
+102:   li      r9,0
+       b       91f
+/* write fault, initial word copy */
+103:   li      r9,1
+91:    li      r3,2
+       b       99f
+
+/*
+ * this stuff handles faults in the cacheline loop and branches to either
+ * 104f (if in read part) or 105f (if in write part), after updating r5
+ */
+       COPY_16_BYTES_EXCODE(0)
+#if L1_CACHE_BYTES >= 32
+       COPY_16_BYTES_EXCODE(1)
+#if L1_CACHE_BYTES >= 64
+       COPY_16_BYTES_EXCODE(2)
+       COPY_16_BYTES_EXCODE(3)
+#if L1_CACHE_BYTES >= 128
+       COPY_16_BYTES_EXCODE(4)
+       COPY_16_BYTES_EXCODE(5)
+       COPY_16_BYTES_EXCODE(6)
+       COPY_16_BYTES_EXCODE(7)
+#endif
+#endif
+#endif
+
+/* read fault in cacheline loop */
+104:   li      r9,0
+       b       92f
+/* fault on dcbz (effectively a write fault) */
+/* or write fault in cacheline loop */
+105:   li      r9,1
+92:    li      r3,LG_CACHELINE_BYTES
+       mfctr   r8
+       add     r0,r0,r8
+       b       106f
+/* read fault in final word loop */
+108:   li      r9,0
+       b       93f
+/* write fault in final word loop */
+109:   li      r9,1
+93:    andi.   r5,r5,3
+       li      r3,2
+       b       99f
+/* read fault in final byte loop */
+110:   li      r9,0
+       b       94f
+/* write fault in final byte loop */
+111:   li      r9,1
+94:    li      r5,0
+       li      r3,0
+/*
+ * At this stage the number of bytes not copied is
+ * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
+ */
+99:    mfctr   r0
+106:   slw     r3,r0,r3
+       add.    r3,r3,r5
+       beq     120f                    /* shouldn't happen */
+       cmpwi   0,r9,0
+       bne     120f
+/* for a read fault, first try to continue the copy one byte at a time */
+       mtctr   r3
+130:   lbz     r0,4(r4)
+131:   stb     r0,4(r6)
+       addi    r4,r4,1
+       addi    r6,r6,1
+       bdnz    130b
+/* then clear out the destination: r3 bytes starting at 4(r6) */
+132:   mfctr   r3
+       srwi.   r0,r3,2
+       li      r9,0
+       mtctr   r0
+       beq     113f
+112:   stwu    r9,4(r6)
+       bdnz    112b
+113:   andi.   r0,r3,3
+       mtctr   r0
+       beq     120f
+114:   stb     r9,4(r6)
+       addi    r6,r6,1
+       bdnz    114b
+120:   blr
+
+       .section __ex_table,"a"
+       .align  2
+       .long   30b,108b
+       .long   31b,109b
+       .long   40b,110b
+       .long   41b,111b
+       .long   130b,132b
+       .long   131b,120b
+       .long   112b,120b
+       .long   114b,120b
+       .text
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
new file mode 100644 (file)
index 0000000..733d616
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * arch/ppc64/lib/copypage.S
+ *
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+_GLOBAL(copy_page)
+       std     r31,-8(1)
+       std     r30,-16(1)
+       std     r29,-24(1)
+       std     r28,-32(1)
+       std     r27,-40(1)
+       std     r26,-48(1)
+       std     r25,-56(1)
+       std     r24,-64(1)
+       std     r23,-72(1)
+       std     r22,-80(1)
+       std     r21,-88(1)
+       std     r20,-96(1)
+       li      r5,4096/32 - 1
+       addi    r3,r3,-8
+       li      r12,5
+0:     addi    r5,r5,-24
+       mtctr   r12
+       ld      r22,640(4)
+       ld      r21,512(4)
+       ld      r20,384(4)
+       ld      r11,256(4)
+       ld      r9,128(4)
+       ld      r7,0(4)
+       ld      r25,648(4)
+       ld      r24,520(4)
+       ld      r23,392(4)
+       ld      r10,264(4)
+       ld      r8,136(4)
+       ldu     r6,8(4)
+       cmpwi   r5,24
+1:     std     r22,648(3)
+       std     r21,520(3)
+       std     r20,392(3)
+       std     r11,264(3)
+       std     r9,136(3)
+       std     r7,8(3)
+       ld      r28,648(4)
+       ld      r27,520(4)
+       ld      r26,392(4)
+       ld      r31,264(4)
+       ld      r30,136(4)
+       ld      r29,8(4)
+       std     r25,656(3)
+       std     r24,528(3)
+       std     r23,400(3)
+       std     r10,272(3)
+       std     r8,144(3)
+       std     r6,16(3)
+       ld      r22,656(4)
+       ld      r21,528(4)
+       ld      r20,400(4)
+       ld      r11,272(4)
+       ld      r9,144(4)
+       ld      r7,16(4)
+       std     r28,664(3)
+       std     r27,536(3)
+       std     r26,408(3)
+       std     r31,280(3)
+       std     r30,152(3)
+       stdu    r29,24(3)
+       ld      r25,664(4)
+       ld      r24,536(4)
+       ld      r23,408(4)
+       ld      r10,280(4)
+       ld      r8,152(4)
+       ldu     r6,24(4)
+       bdnz    1b
+       std     r22,648(3)
+       std     r21,520(3)
+       std     r20,392(3)
+       std     r11,264(3)
+       std     r9,136(3)
+       std     r7,8(3)
+       addi    r4,r4,640
+       addi    r3,r3,648
+       bge     0b
+       mtctr   r5
+       ld      r7,0(4)
+       ld      r8,8(4)
+       ldu     r9,16(4)
+3:     ld      r10,8(4)
+       std     r7,8(3)
+       ld      r7,16(4)
+       std     r8,16(3)
+       ld      r8,24(4)
+       std     r9,24(3)
+       ldu     r9,32(4)
+       stdu    r10,32(3)
+       bdnz    3b
+4:     ld      r10,8(4)
+       std     r7,8(3)
+       std     r8,16(3)
+       std     r9,24(3)
+       std     r10,32(3)
+9:     ld      r20,-96(1)
+       ld      r21,-88(1)
+       ld      r22,-80(1)
+       ld      r23,-72(1)
+       ld      r24,-64(1)
+       ld      r25,-56(1)
+       ld      r26,-48(1)
+       ld      r27,-40(1)
+       ld      r28,-32(1)
+       ld      r29,-24(1)
+       ld      r30,-16(1)
+       ld      r31,-8(1)
+       blr
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
new file mode 100644 (file)
index 0000000..a0b3fbb
--- /dev/null
@@ -0,0 +1,576 @@
+/*
+ * arch/ppc64/lib/copyuser.S
+ *
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+       .align  7
+_GLOBAL(__copy_tofrom_user)
+       /* first check for a whole page copy on a page boundary */
+       cmpldi  cr1,r5,16
+       cmpdi   cr6,r5,4096
+       or      r0,r3,r4
+       neg     r6,r3           /* LS 3 bits = # bytes to 8-byte dest bdry */
+       andi.   r0,r0,4095
+       std     r3,-24(r1)
+       crand   cr0*4+2,cr0*4+2,cr6*4+2
+       std     r4,-16(r1)
+       std     r5,-8(r1)
+       dcbt    0,r4
+       beq     .Lcopy_page
+       andi.   r6,r6,7
+       mtcrf   0x01,r5
+       blt     cr1,.Lshort_copy
+       bne     .Ldst_unaligned
+.Ldst_aligned:
+       andi.   r0,r4,7
+       addi    r3,r3,-16
+       bne     .Lsrc_unaligned
+       srdi    r7,r5,4
+20:    ld      r9,0(r4)
+       addi    r4,r4,-8
+       mtctr   r7
+       andi.   r5,r5,7
+       bf      cr7*4+0,22f
+       addi    r3,r3,8
+       addi    r4,r4,8
+       mr      r8,r9
+       blt     cr1,72f
+21:    ld      r9,8(r4)
+70:    std     r8,8(r3)
+22:    ldu     r8,16(r4)
+71:    stdu    r9,16(r3)
+       bdnz    21b
+72:    std     r8,8(r3)
+       beq+    3f
+       addi    r3,r3,16
+23:    ld      r9,8(r4)
+.Ldo_tail:
+       bf      cr7*4+1,1f
+       rotldi  r9,r9,32
+73:    stw     r9,0(r3)
+       addi    r3,r3,4
+1:     bf      cr7*4+2,2f
+       rotldi  r9,r9,16
+74:    sth     r9,0(r3)
+       addi    r3,r3,2
+2:     bf      cr7*4+3,3f
+       rotldi  r9,r9,8
+75:    stb     r9,0(r3)
+3:     li      r3,0
+       blr
+
+.Lsrc_unaligned:
+       srdi    r6,r5,3
+       addi    r5,r5,-16
+       subf    r4,r0,r4
+       srdi    r7,r5,4
+       sldi    r10,r0,3
+       cmpldi  cr6,r6,3
+       andi.   r5,r5,7
+       mtctr   r7
+       subfic  r11,r10,64
+       add     r5,r5,r0
+       bt      cr7*4+0,28f
+
+24:    ld      r9,0(r4)        /* 3+2n loads, 2+2n stores */
+25:    ld      r0,8(r4)
+       sld     r6,r9,r10
+26:    ldu     r9,16(r4)
+       srd     r7,r0,r11
+       sld     r8,r0,r10
+       or      r7,r7,r6
+       blt     cr6,79f
+27:    ld      r0,8(r4)
+       b       2f
+
+28:    ld      r0,0(r4)        /* 4+2n loads, 3+2n stores */
+29:    ldu     r9,8(r4)
+       sld     r8,r0,r10
+       addi    r3,r3,-8
+       blt     cr6,5f
+30:    ld      r0,8(r4)
+       srd     r12,r9,r11
+       sld     r6,r9,r10
+31:    ldu     r9,16(r4)
+       or      r12,r8,r12
+       srd     r7,r0,r11
+       sld     r8,r0,r10
+       addi    r3,r3,16
+       beq     cr6,78f
+
+1:     or      r7,r7,r6
+32:    ld      r0,8(r4)
+76:    std     r12,8(r3)
+2:     srd     r12,r9,r11
+       sld     r6,r9,r10
+33:    ldu     r9,16(r4)
+       or      r12,r8,r12
+77:    stdu    r7,16(r3)
+       srd     r7,r0,r11
+       sld     r8,r0,r10
+       bdnz    1b
+
+78:    std     r12,8(r3)
+       or      r7,r7,r6
+79:    std     r7,16(r3)
+5:     srd     r12,r9,r11
+       or      r12,r8,r12
+80:    std     r12,24(r3)
+       bne     6f
+       li      r3,0
+       blr
+6:     cmpwi   cr1,r5,8
+       addi    r3,r3,32
+       sld     r9,r9,r10
+       ble     cr1,.Ldo_tail
+34:    ld      r0,8(r4)
+       srd     r7,r0,r11
+       or      r9,r7,r9
+       b       .Ldo_tail
+
+.Ldst_unaligned:
+       mtcrf   0x01,r6         /* put #bytes to 8B bdry into cr7 */
+       subf    r5,r6,r5
+       li      r7,0
+       cmpldi  r1,r5,16
+       bf      cr7*4+3,1f
+35:    lbz     r0,0(r4)
+81:    stb     r0,0(r3)
+       addi    r7,r7,1
+1:     bf      cr7*4+2,2f
+36:    lhzx    r0,r7,r4
+82:    sthx    r0,r7,r3
+       addi    r7,r7,2
+2:     bf      cr7*4+1,3f
+37:    lwzx    r0,r7,r4
+83:    stwx    r0,r7,r3
+3:     mtcrf   0x01,r5
+       add     r4,r6,r4
+       add     r3,r6,r3
+       b       .Ldst_aligned
+
+.Lshort_copy:
+       bf      cr7*4+0,1f
+38:    lwz     r0,0(r4)
+39:    lwz     r9,4(r4)
+       addi    r4,r4,8
+84:    stw     r0,0(r3)
+85:    stw     r9,4(r3)
+       addi    r3,r3,8
+1:     bf      cr7*4+1,2f
+40:    lwz     r0,0(r4)
+       addi    r4,r4,4
+86:    stw     r0,0(r3)
+       addi    r3,r3,4
+2:     bf      cr7*4+2,3f
+41:    lhz     r0,0(r4)
+       addi    r4,r4,2
+87:    sth     r0,0(r3)
+       addi    r3,r3,2
+3:     bf      cr7*4+3,4f
+42:    lbz     r0,0(r4)
+88:    stb     r0,0(r3)
+4:     li      r3,0
+       blr
+
+/*
+ * exception handlers follow
+ * we have to return the number of bytes not copied
+ * for an exception on a load, we set the rest of the destination to 0
+ */
+
+136:
+137:
+       add     r3,r3,r7
+       b       1f
+130:
+131:
+       addi    r3,r3,8
+120:
+122:
+124:
+125:
+126:
+127:
+128:
+129:
+133:
+       addi    r3,r3,8
+121:
+132:
+       addi    r3,r3,8
+123:
+134:
+135:
+138:
+139:
+140:
+141:
+142:
+
+/*
+ * here we have had a fault on a load and r3 points to the first
+ * unmodified byte of the destination
+ */
+1:     ld      r6,-24(r1)
+       ld      r4,-16(r1)
+       ld      r5,-8(r1)
+       subf    r6,r6,r3
+       add     r4,r4,r6
+       subf    r5,r6,r5        /* #bytes left to go */
+
+/*
+ * first see if we can copy any more bytes before hitting another exception
+ */
+       mtctr   r5
+43:    lbz     r0,0(r4)
+       addi    r4,r4,1
+89:    stb     r0,0(r3)
+       addi    r3,r3,1
+       bdnz    43b
+       li      r3,0            /* huh? all copied successfully this time? */
+       blr
+
+/*
+ * here we have trapped again, need to clear ctr bytes starting at r3
+ */
+143:   mfctr   r5
+       li      r0,0
+       mr      r4,r3
+       mr      r3,r5           /* return the number of bytes not copied */
+1:     andi.   r9,r4,7
+       beq     3f
+90:    stb     r0,0(r4)
+       addic.  r5,r5,-1
+       addi    r4,r4,1
+       bne     1b
+       blr
+3:     cmpldi  cr1,r5,8
+       srdi    r9,r5,3
+       andi.   r5,r5,7
+       blt     cr1,93f
+       mtctr   r9
+91:    std     r0,0(r4)
+       addi    r4,r4,8
+       bdnz    91b
+93:    beqlr
+       mtctr   r5      
+92:    stb     r0,0(r4)
+       addi    r4,r4,1
+       bdnz    92b
+       blr
+
+/*
+ * exception handlers for stores: we just need to work
+ * out how many bytes weren't copied
+ */
+182:
+183:
+       add     r3,r3,r7
+       b       1f
+180:
+       addi    r3,r3,8
+171:
+177:
+       addi    r3,r3,8
+170:
+172:
+176:
+178:
+       addi    r3,r3,4
+185:
+       addi    r3,r3,4
+173:
+174:
+175:
+179:
+181:
+184:
+186:
+187:
+188:
+189:   
+1:
+       ld      r6,-24(r1)
+       ld      r5,-8(r1)
+       add     r6,r6,r5
+       subf    r3,r3,r6        /* #bytes not copied */
+190:
+191:
+192:
+       blr                     /* #bytes not copied in r3 */
+
+       .section __ex_table,"a"
+       .align  3
+       .llong  20b,120b
+       .llong  21b,121b
+       .llong  70b,170b
+       .llong  22b,122b
+       .llong  71b,171b
+       .llong  72b,172b
+       .llong  23b,123b
+       .llong  73b,173b
+       .llong  74b,174b
+       .llong  75b,175b
+       .llong  24b,124b
+       .llong  25b,125b
+       .llong  26b,126b
+       .llong  27b,127b
+       .llong  28b,128b
+       .llong  29b,129b
+       .llong  30b,130b
+       .llong  31b,131b
+       .llong  32b,132b
+       .llong  76b,176b
+       .llong  33b,133b
+       .llong  77b,177b
+       .llong  78b,178b
+       .llong  79b,179b
+       .llong  80b,180b
+       .llong  34b,134b
+       .llong  35b,135b
+       .llong  81b,181b
+       .llong  36b,136b
+       .llong  82b,182b
+       .llong  37b,137b
+       .llong  83b,183b
+       .llong  38b,138b
+       .llong  39b,139b
+       .llong  84b,184b
+       .llong  85b,185b
+       .llong  40b,140b
+       .llong  86b,186b
+       .llong  41b,141b
+       .llong  87b,187b
+       .llong  42b,142b
+       .llong  88b,188b
+       .llong  43b,143b
+       .llong  89b,189b
+       .llong  90b,190b
+       .llong  91b,191b
+       .llong  92b,192b
+       
+       .text
+
+/*
+ * Routine to copy a whole page of data, optimized for POWER4.
+ * On POWER4 it is more than 50% faster than the simple loop
+ * above (following the .Ldst_aligned label) but it runs slightly
+ * slower on POWER3.
+ */
+.Lcopy_page:
+       std     r31,-32(1)
+       std     r30,-40(1)
+       std     r29,-48(1)
+       std     r28,-56(1)
+       std     r27,-64(1)
+       std     r26,-72(1)
+       std     r25,-80(1)
+       std     r24,-88(1)
+       std     r23,-96(1)
+       std     r22,-104(1)
+       std     r21,-112(1)
+       std     r20,-120(1)
+       li      r5,4096/32 - 1
+       addi    r3,r3,-8
+       li      r0,5
+0:     addi    r5,r5,-24
+       mtctr   r0
+20:    ld      r22,640(4)
+21:    ld      r21,512(4)
+22:    ld      r20,384(4)
+23:    ld      r11,256(4)
+24:    ld      r9,128(4)
+25:    ld      r7,0(4)
+26:    ld      r25,648(4)
+27:    ld      r24,520(4)
+28:    ld      r23,392(4)
+29:    ld      r10,264(4)
+30:    ld      r8,136(4)
+31:    ldu     r6,8(4)
+       cmpwi   r5,24
+1:
+32:    std     r22,648(3)
+33:    std     r21,520(3)
+34:    std     r20,392(3)
+35:    std     r11,264(3)
+36:    std     r9,136(3)
+37:    std     r7,8(3)
+38:    ld      r28,648(4)
+39:    ld      r27,520(4)
+40:    ld      r26,392(4)
+41:    ld      r31,264(4)
+42:    ld      r30,136(4)
+43:    ld      r29,8(4)
+44:    std     r25,656(3)
+45:    std     r24,528(3)
+46:    std     r23,400(3)
+47:    std     r10,272(3)
+48:    std     r8,144(3)
+49:    std     r6,16(3)
+50:    ld      r22,656(4)
+51:    ld      r21,528(4)
+52:    ld      r20,400(4)
+53:    ld      r11,272(4)
+54:    ld      r9,144(4)
+55:    ld      r7,16(4)
+56:    std     r28,664(3)
+57:    std     r27,536(3)
+58:    std     r26,408(3)
+59:    std     r31,280(3)
+60:    std     r30,152(3)
+61:    stdu    r29,24(3)
+62:    ld      r25,664(4)
+63:    ld      r24,536(4)
+64:    ld      r23,408(4)
+65:    ld      r10,280(4)
+66:    ld      r8,152(4)
+67:    ldu     r6,24(4)
+       bdnz    1b
+68:    std     r22,648(3)
+69:    std     r21,520(3)
+70:    std     r20,392(3)
+71:    std     r11,264(3)
+72:    std     r9,136(3)
+73:    std     r7,8(3)
+74:    addi    r4,r4,640
+75:    addi    r3,r3,648
+       bge     0b
+       mtctr   r5
+76:    ld      r7,0(4)
+77:    ld      r8,8(4)
+78:    ldu     r9,16(4)
+3:
+79:    ld      r10,8(4)
+80:    std     r7,8(3)
+81:    ld      r7,16(4)
+82:    std     r8,16(3)
+83:    ld      r8,24(4)
+84:    std     r9,24(3)
+85:    ldu     r9,32(4)
+86:    stdu    r10,32(3)
+       bdnz    3b
+4:
+87:    ld      r10,8(4)
+88:    std     r7,8(3)
+89:    std     r8,16(3)
+90:    std     r9,24(3)
+91:    std     r10,32(3)
+9:     ld      r20,-120(1)
+       ld      r21,-112(1)
+       ld      r22,-104(1)
+       ld      r23,-96(1)
+       ld      r24,-88(1)
+       ld      r25,-80(1)
+       ld      r26,-72(1)
+       ld      r27,-64(1)
+       ld      r28,-56(1)
+       ld      r29,-48(1)
+       ld      r30,-40(1)
+       ld      r31,-32(1)
+       li      r3,0
+       blr
+
+/*
+ * on an exception, reset to the beginning and jump back into the
+ * standard __copy_tofrom_user
+ */
+100:   ld      r20,-120(1)
+       ld      r21,-112(1)
+       ld      r22,-104(1)
+       ld      r23,-96(1)
+       ld      r24,-88(1)
+       ld      r25,-80(1)
+       ld      r26,-72(1)
+       ld      r27,-64(1)
+       ld      r28,-56(1)
+       ld      r29,-48(1)
+       ld      r30,-40(1)
+       ld      r31,-32(1)
+       ld      r3,-24(r1)
+       ld      r4,-16(r1)
+       li      r5,4096
+       b       .Ldst_aligned
+
+       .section __ex_table,"a"
+       .align  3
+       .llong  20b,100b
+       .llong  21b,100b
+       .llong  22b,100b
+       .llong  23b,100b
+       .llong  24b,100b
+       .llong  25b,100b
+       .llong  26b,100b
+       .llong  27b,100b
+       .llong  28b,100b
+       .llong  29b,100b
+       .llong  30b,100b
+       .llong  31b,100b
+       .llong  32b,100b
+       .llong  33b,100b
+       .llong  34b,100b
+       .llong  35b,100b
+       .llong  36b,100b
+       .llong  37b,100b
+       .llong  38b,100b
+       .llong  39b,100b
+       .llong  40b,100b
+       .llong  41b,100b
+       .llong  42b,100b
+       .llong  43b,100b
+       .llong  44b,100b
+       .llong  45b,100b
+       .llong  46b,100b
+       .llong  47b,100b
+       .llong  48b,100b
+       .llong  49b,100b
+       .llong  50b,100b
+       .llong  51b,100b
+       .llong  52b,100b
+       .llong  53b,100b
+       .llong  54b,100b
+       .llong  55b,100b
+       .llong  56b,100b
+       .llong  57b,100b
+       .llong  58b,100b
+       .llong  59b,100b
+       .llong  60b,100b
+       .llong  61b,100b
+       .llong  62b,100b
+       .llong  63b,100b
+       .llong  64b,100b
+       .llong  65b,100b
+       .llong  66b,100b
+       .llong  67b,100b
+       .llong  68b,100b
+       .llong  69b,100b
+       .llong  70b,100b
+       .llong  71b,100b
+       .llong  72b,100b
+       .llong  73b,100b
+       .llong  74b,100b
+       .llong  75b,100b
+       .llong  76b,100b
+       .llong  77b,100b
+       .llong  78b,100b
+       .llong  79b,100b
+       .llong  80b,100b
+       .llong  81b,100b
+       .llong  82b,100b
+       .llong  83b,100b
+       .llong  84b,100b
+       .llong  85b,100b
+       .llong  86b,100b
+       .llong  87b,100b
+       .llong  88b,100b
+       .llong  89b,100b
+       .llong  90b,100b
+       .llong  91b,100b
diff --git a/arch/powerpc/lib/div64.S b/arch/powerpc/lib/div64.S
new file mode 100644 (file)
index 0000000..83d9832
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Divide a 64-bit unsigned number by a 32-bit unsigned number.
+ * This routine assumes that the top 32 bits of the dividend are
+ * non-zero to start with.
+ * On entry, r3 points to the dividend, which get overwritten with
+ * the 64-bit quotient, and r4 contains the divisor.
+ * On exit, r3 contains the remainder.
+ *
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/ppc_asm.h>
+#include <asm/processor.h>
+
+_GLOBAL(__div64_32)
+       lwz     r5,0(r3)        # get the dividend into r5/r6
+       lwz     r6,4(r3)
+       cmplw   r5,r4
+       li      r7,0
+       li      r8,0
+       blt     1f
+       divwu   r7,r5,r4        # if dividend.hi >= divisor,
+       mullw   r0,r7,r4        # quotient.hi = dividend.hi / divisor
+       subf.   r5,r0,r5        # dividend.hi %= divisor
+       beq     3f
+1:     mr      r11,r5          # here dividend.hi != 0
+       andis.  r0,r5,0xc000
+       bne     2f
+       cntlzw  r0,r5           # we are shifting the dividend right
+       li      r10,-1          # to make it < 2^32, and shifting
+       srw     r10,r10,r0      # the divisor right the same amount,
+       addc    r9,r4,r10       # rounding up (so the estimate cannot
+       andc    r11,r6,r10      # ever be too large, only too small)
+       andc    r9,r9,r10
+       addze   r9,r9
+       or      r11,r5,r11
+       rotlw   r9,r9,r0
+       rotlw   r11,r11,r0
+       divwu   r11,r11,r9      # then we divide the shifted quantities
+2:     mullw   r10,r11,r4      # to get an estimate of the quotient,
+       mulhwu  r9,r11,r4       # multiply the estimate by the divisor,
+       subfc   r6,r10,r6       # take the product from the divisor,
+       add     r8,r8,r11       # and add the estimate to the accumulated
+       subfe.  r5,r9,r5        # quotient
+       bne     1b
+3:     cmplw   r6,r4
+       blt     4f
+       divwu   r0,r6,r4        # perform the remaining 32-bit division
+       mullw   r10,r0,r4       # and get the remainder
+       add     r8,r8,r0
+       subf    r6,r10,r6
+4:     stw     r7,0(r3)        # return the quotient in *r3
+       stw     r8,4(r3)
+       mr      r3,r6           # return the remainder in r3
+       blr
diff --git a/arch/powerpc/lib/e2a.c b/arch/powerpc/lib/e2a.c
new file mode 100644 (file)
index 0000000..d2b8348
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ *  arch/ppc64/lib/e2a.c
+ *
+ *  EBCDIC to ASCII conversion
+ *
+ * This function moved here from arch/ppc64/kernel/viopath.c
+ *
+ * (C) Copyright 2000-2004 IBM Corporation
+ *
+ * This program is free software;  you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) anyu later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+
+unsigned char e2a(unsigned char x)
+{
+       switch (x) {
+       case 0xF0:
+               return '0';
+       case 0xF1:
+               return '1';
+       case 0xF2:
+               return '2';
+       case 0xF3:
+               return '3';
+       case 0xF4:
+               return '4';
+       case 0xF5:
+               return '5';
+       case 0xF6:
+               return '6';
+       case 0xF7:
+               return '7';
+       case 0xF8:
+               return '8';
+       case 0xF9:
+               return '9';
+       case 0xC1:
+               return 'A';
+       case 0xC2:
+               return 'B';
+       case 0xC3:
+               return 'C';
+       case 0xC4:
+               return 'D';
+       case 0xC5:
+               return 'E';
+       case 0xC6:
+               return 'F';
+       case 0xC7:
+               return 'G';
+       case 0xC8:
+               return 'H';
+       case 0xC9:
+               return 'I';
+       case 0xD1:
+               return 'J';
+       case 0xD2:
+               return 'K';
+       case 0xD3:
+               return 'L';
+       case 0xD4:
+               return 'M';
+       case 0xD5:
+               return 'N';
+       case 0xD6:
+               return 'O';
+       case 0xD7:
+               return 'P';
+       case 0xD8:
+               return 'Q';
+       case 0xD9:
+               return 'R';
+       case 0xE2:
+               return 'S';
+       case 0xE3:
+               return 'T';
+       case 0xE4:
+               return 'U';
+       case 0xE5:
+               return 'V';
+       case 0xE6:
+               return 'W';
+       case 0xE7:
+               return 'X';
+       case 0xE8:
+               return 'Y';
+       case 0xE9:
+               return 'Z';
+       }
+       return ' ';
+}
+EXPORT_SYMBOL(e2a);
+
+
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
new file mode 100644 (file)
index 0000000..4b8c5ad
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Spin and read/write lock operations.
+ *
+ * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
+ *   Rework to support virtual processors
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/stringify.h>
+
+/* waiting for a spinlock... */
+#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+#include <asm/hvcall.h>
+#include <asm/iSeries/HvCall.h>
+
+void __spin_yield(raw_spinlock_t *lock)
+{
+       unsigned int lock_value, holder_cpu, yield_count;
+       struct paca_struct *holder_paca;
+
+       lock_value = lock->slock;
+       if (lock_value == 0)
+               return;
+       holder_cpu = lock_value & 0xffff;
+       BUG_ON(holder_cpu >= NR_CPUS);
+       holder_paca = &paca[holder_cpu];
+       yield_count = holder_paca->lppaca.yield_count;
+       if ((yield_count & 1) == 0)
+               return;         /* virtual cpu is currently running */
+       rmb();
+       if (lock->slock != lock_value)
+               return;         /* something has changed */
+#ifdef CONFIG_PPC_ISERIES
+       HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
+               ((u64)holder_cpu << 32) | yield_count);
+#else
+       plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
+                          yield_count);
+#endif
+}
+
+/*
+ * Waiting for a read lock or a write lock on a rwlock...
+ * This turns out to be the same for read and write locks, since
+ * we only know the holder if it is write-locked.
+ */
+void __rw_yield(raw_rwlock_t *rw)
+{
+       int lock_value;
+       unsigned int holder_cpu, yield_count;
+       struct paca_struct *holder_paca;
+
+       lock_value = rw->lock;
+       if (lock_value >= 0)
+               return;         /* no write lock at present */
+       holder_cpu = lock_value & 0xffff;
+       BUG_ON(holder_cpu >= NR_CPUS);
+       holder_paca = &paca[holder_cpu];
+       yield_count = holder_paca->lppaca.yield_count;
+       if ((yield_count & 1) == 0)
+               return;         /* virtual cpu is currently running */
+       rmb();
+       if (rw->lock != lock_value)
+               return;         /* something has changed */
+#ifdef CONFIG_PPC_ISERIES
+       HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
+               ((u64)holder_cpu << 32) | yield_count);
+#else
+       plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
+                          yield_count);
+#endif
+}
+#endif
+
+void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+{
+       while (lock->slock) {
+               HMT_low();
+               if (SHARED_PROCESSOR)
+                       __spin_yield(lock);
+       }
+       HMT_medium();
+}
+
+EXPORT_SYMBOL(__raw_spin_unlock_wait);
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
new file mode 100644 (file)
index 0000000..68df202
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * String handling functions for PowerPC.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/processor.h>
+#include <asm/errno.h>
+#include <asm/ppc_asm.h>
+
+_GLOBAL(memset)
+       neg     r0,r3
+       rlwimi  r4,r4,8,16,23
+       andi.   r0,r0,7                 /* # bytes to be 8-byte aligned */
+       rlwimi  r4,r4,16,0,15
+       cmplw   cr1,r5,r0               /* do we get that far? */
+       rldimi  r4,r4,32,0
+       mtcrf   1,r0
+       mr      r6,r3
+       blt     cr1,8f
+       beq+    3f                      /* if already 8-byte aligned */
+       subf    r5,r0,r5
+       bf      31,1f
+       stb     r4,0(r6)
+       addi    r6,r6,1
+1:     bf      30,2f
+       sth     r4,0(r6)
+       addi    r6,r6,2
+2:     bf      29,3f
+       stw     r4,0(r6)
+       addi    r6,r6,4
+3:     srdi.   r0,r5,6
+       clrldi  r5,r5,58
+       mtctr   r0
+       beq     5f
+4:     std     r4,0(r6)
+       std     r4,8(r6)
+       std     r4,16(r6)
+       std     r4,24(r6)
+       std     r4,32(r6)
+       std     r4,40(r6)
+       std     r4,48(r6)
+       std     r4,56(r6)
+       addi    r6,r6,64
+       bdnz    4b
+5:     srwi.   r0,r5,3
+       clrlwi  r5,r5,29
+       mtcrf   1,r0
+       beq     8f
+       bf      29,6f
+       std     r4,0(r6)
+       std     r4,8(r6)
+       std     r4,16(r6)
+       std     r4,24(r6)
+       addi    r6,r6,32
+6:     bf      30,7f
+       std     r4,0(r6)
+       std     r4,8(r6)
+       addi    r6,r6,16
+7:     bf      31,8f
+       std     r4,0(r6)
+       addi    r6,r6,8
+8:     cmpwi   r5,0
+       mtcrf   1,r5
+       beqlr+
+       bf      29,9f
+       stw     r4,0(r6)
+       addi    r6,r6,4
+9:     bf      30,10f
+       sth     r4,0(r6)
+       addi    r6,r6,2
+10:    bflr    31
+       stb     r4,0(r6)
+       blr
+
+_GLOBAL(memmove)
+       cmplw   0,r3,r4
+       bgt     .backwards_memcpy
+       b       .memcpy
+
+_GLOBAL(backwards_memcpy)
+       rlwinm. r7,r5,32-3,3,31         /* r0 = r5 >> 3 */
+       add     r6,r3,r5
+       add     r4,r4,r5
+       beq     2f
+       andi.   r0,r6,3
+       mtctr   r7
+       bne     5f
+1:     lwz     r7,-4(r4)
+       lwzu    r8,-8(r4)
+       stw     r7,-4(r6)
+       stwu    r8,-8(r6)
+       bdnz    1b
+       andi.   r5,r5,7
+2:     cmplwi  0,r5,4
+       blt     3f
+       lwzu    r0,-4(r4)
+       subi    r5,r5,4
+       stwu    r0,-4(r6)
+3:     cmpwi   0,r5,0
+       beqlr
+       mtctr   r5
+4:     lbzu    r0,-1(r4)
+       stbu    r0,-1(r6)
+       bdnz    4b
+       blr
+5:     mtctr   r0
+6:     lbzu    r7,-1(r4)
+       stbu    r7,-1(r6)
+       bdnz    6b
+       subf    r5,r0,r5
+       rlwinm. r7,r5,32-3,3,31
+       beq     2b
+       mtctr   r7
+       b       1b
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
new file mode 100644 (file)
index 0000000..9ccacdf
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * arch/ppc64/lib/memcpy.S
+ *
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+       .align  7
+_GLOBAL(memcpy)
+       mtcrf   0x01,r5
+       cmpldi  cr1,r5,16
+       neg     r6,r3           # LS 3 bits = # bytes to 8-byte dest bdry
+       andi.   r6,r6,7
+       dcbt    0,r4
+       blt     cr1,.Lshort_copy
+       bne     .Ldst_unaligned
+.Ldst_aligned:
+       andi.   r0,r4,7
+       addi    r3,r3,-16
+       bne     .Lsrc_unaligned
+       srdi    r7,r5,4
+       ld      r9,0(r4)
+       addi    r4,r4,-8
+       mtctr   r7
+       andi.   r5,r5,7
+       bf      cr7*4+0,2f
+       addi    r3,r3,8
+       addi    r4,r4,8
+       mr      r8,r9
+       blt     cr1,3f
+1:     ld      r9,8(r4)
+       std     r8,8(r3)
+2:     ldu     r8,16(r4)
+       stdu    r9,16(r3)
+       bdnz    1b
+3:     std     r8,8(r3)
+       beqlr
+       addi    r3,r3,16
+       ld      r9,8(r4)
+.Ldo_tail:
+       bf      cr7*4+1,1f
+       rotldi  r9,r9,32
+       stw     r9,0(r3)
+       addi    r3,r3,4
+1:     bf      cr7*4+2,2f
+       rotldi  r9,r9,16
+       sth     r9,0(r3)
+       addi    r3,r3,2
+2:     bf      cr7*4+3,3f
+       rotldi  r9,r9,8
+       stb     r9,0(r3)
+3:     blr
+
+.Lsrc_unaligned:
+       srdi    r6,r5,3
+       addi    r5,r5,-16
+       subf    r4,r0,r4
+       srdi    r7,r5,4
+       sldi    r10,r0,3
+       cmpdi   cr6,r6,3
+       andi.   r5,r5,7
+       mtctr   r7
+       subfic  r11,r10,64
+       add     r5,r5,r0
+
+       bt      cr7*4+0,0f
+
+       ld      r9,0(r4)        # 3+2n loads, 2+2n stores
+       ld      r0,8(r4)
+       sld     r6,r9,r10
+       ldu     r9,16(r4)
+       srd     r7,r0,r11
+       sld     r8,r0,r10
+       or      r7,r7,r6
+       blt     cr6,4f
+       ld      r0,8(r4)
+       # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
+       b       2f
+
+0:     ld      r0,0(r4)        # 4+2n loads, 3+2n stores
+       ldu     r9,8(r4)
+       sld     r8,r0,r10
+       addi    r3,r3,-8
+       blt     cr6,5f
+       ld      r0,8(r4)
+       srd     r12,r9,r11
+       sld     r6,r9,r10
+       ldu     r9,16(r4)
+       or      r12,r8,r12
+       srd     r7,r0,r11
+       sld     r8,r0,r10
+       addi    r3,r3,16
+       beq     cr6,3f
+
+       # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
+1:     or      r7,r7,r6
+       ld      r0,8(r4)
+       std     r12,8(r3)
+2:     srd     r12,r9,r11
+       sld     r6,r9,r10
+       ldu     r9,16(r4)
+       or      r12,r8,r12
+       stdu    r7,16(r3)
+       srd     r7,r0,r11
+       sld     r8,r0,r10
+       bdnz    1b
+
+3:     std     r12,8(r3)
+       or      r7,r7,r6
+4:     std     r7,16(r3)
+5:     srd     r12,r9,r11
+       or      r12,r8,r12
+       std     r12,24(r3)
+       beqlr
+       cmpwi   cr1,r5,8
+       addi    r3,r3,32
+       sld     r9,r9,r10
+       ble     cr1,.Ldo_tail
+       ld      r0,8(r4)
+       srd     r7,r0,r11
+       or      r9,r7,r9
+       b       .Ldo_tail
+
+.Ldst_unaligned:
+       mtcrf   0x01,r6         # put #bytes to 8B bdry into cr7
+       subf    r5,r6,r5
+       li      r7,0
+       cmpldi  r1,r5,16
+       bf      cr7*4+3,1f
+       lbz     r0,0(r4)
+       stb     r0,0(r3)
+       addi    r7,r7,1
+1:     bf      cr7*4+2,2f
+       lhzx    r0,r7,r4
+       sthx    r0,r7,r3
+       addi    r7,r7,2
+2:     bf      cr7*4+1,3f
+       lwzx    r0,r7,r4
+       stwx    r0,r7,r3
+3:     mtcrf   0x01,r5
+       add     r4,r6,r4
+       add     r3,r6,r3
+       b       .Ldst_aligned
+
+.Lshort_copy:
+       bf      cr7*4+0,1f
+       lwz     r0,0(r4)
+       lwz     r9,4(r4)
+       addi    r4,r4,8
+       stw     r0,0(r3)
+       stw     r9,4(r3)
+       addi    r3,r3,8
+1:     bf      cr7*4+1,2f
+       lwz     r0,0(r4)
+       addi    r4,r4,4
+       stw     r0,0(r3)
+       addi    r3,r3,4
+2:     bf      cr7*4+2,3f
+       lhz     r0,0(r4)
+       addi    r4,r4,2
+       sth     r0,0(r3)
+       addi    r3,r3,2
+3:     bf      cr7*4+3,4f
+       lbz     r0,0(r4)
+       stb     r0,0(r3)
+4:     blr
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
new file mode 100644 (file)
index 0000000..42c5de2
--- /dev/null
@@ -0,0 +1,693 @@
+/*
+ * arch/ppc/syslib/rheap.c
+ *
+ * A Remote Heap.  Remote means that we don't touch the memory that the
+ * heap points to. Normal heap implementations use the memory they manage
+ * to place their list. We cannot do that because the memory we manage may
+ * have special properties, for example it is uncachable or of different
+ * endianess.
+ *
+ * Author: Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/rheap.h>
+
+/*
+ * Fixup a list_head, needed when copying lists.  If the pointers fall
+ * between s and e, apply the delta.  This assumes that
+ * sizeof(struct list_head *) == sizeof(unsigned long *).
+ */
+static inline void fixup(unsigned long s, unsigned long e, int d,
+                        struct list_head *l)
+{
+       unsigned long *pp;
+
+       pp = (unsigned long *)&l->next;
+       if (*pp >= s && *pp < e)
+               *pp += d;
+
+       pp = (unsigned long *)&l->prev;
+       if (*pp >= s && *pp < e)
+               *pp += d;
+}
+
+/* Grow the allocated blocks */
+static int grow(rh_info_t * info, int max_blocks)
+{
+       rh_block_t *block, *blk;
+       int i, new_blocks;
+       int delta;
+       unsigned long blks, blke;
+
+       if (max_blocks <= info->max_blocks)
+               return -EINVAL;
+
+       new_blocks = max_blocks - info->max_blocks;
+
+       block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL);
+       if (block == NULL)
+               return -ENOMEM;
+
+       if (info->max_blocks > 0) {
+
+               /* copy old block area */
+               memcpy(block, info->block,
+                      sizeof(rh_block_t) * info->max_blocks);
+
+               delta = (char *)block - (char *)info->block;
+
+               /* and fixup list pointers */
+               blks = (unsigned long)info->block;
+               blke = (unsigned long)(info->block + info->max_blocks);
+
+               for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
+                       fixup(blks, blke, delta, &blk->list);
+
+               fixup(blks, blke, delta, &info->empty_list);
+               fixup(blks, blke, delta, &info->free_list);
+               fixup(blks, blke, delta, &info->taken_list);
+
+               /* free the old allocated memory */
+               if ((info->flags & RHIF_STATIC_BLOCK) == 0)
+                       kfree(info->block);
+       }
+
+       info->block = block;
+       info->empty_slots += new_blocks;
+       info->max_blocks = max_blocks;
+       info->flags &= ~RHIF_STATIC_BLOCK;
+
+       /* add all new blocks to the free list */
+       for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++)
+               list_add(&blk->list, &info->empty_list);
+
+       return 0;
+}
+
+/*
+ * Assure at least the required amount of empty slots.  If this function
+ * causes a grow in the block area then all pointers kept to the block
+ * area are invalid!
+ */
+static int assure_empty(rh_info_t * info, int slots)
+{
+       int max_blocks;
+
+       /* This function is not meant to be used to grow uncontrollably */
+       if (slots >= 4)
+               return -EINVAL;
+
+       /* Enough space */
+       if (info->empty_slots >= slots)
+               return 0;
+
+       /* Next 16 sized block */
+       max_blocks = ((info->max_blocks + slots) + 15) & ~15;
+
+       return grow(info, max_blocks);
+}
+
+static rh_block_t *get_slot(rh_info_t * info)
+{
+       rh_block_t *blk;
+
+       /* If no more free slots, and failure to extend. */
+       /* XXX: You should have called assure_empty before */
+       if (info->empty_slots == 0) {
+               printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
+               return NULL;
+       }
+
+       /* Get empty slot to use */
+       blk = list_entry(info->empty_list.next, rh_block_t, list);
+       list_del_init(&blk->list);
+       info->empty_slots--;
+
+       /* Initialize */
+       blk->start = NULL;
+       blk->size = 0;
+       blk->owner = NULL;
+
+       return blk;
+}
+
+static inline void release_slot(rh_info_t * info, rh_block_t * blk)
+{
+       list_add(&blk->list, &info->empty_list);
+       info->empty_slots++;
+}
+
+static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
+{
+       rh_block_t *blk;
+       rh_block_t *before;
+       rh_block_t *after;
+       rh_block_t *next;
+       int size;
+       unsigned long s, e, bs, be;
+       struct list_head *l;
+
+       /* We assume that they are aligned properly */
+       size = blkn->size;
+       s = (unsigned long)blkn->start;
+       e = s + size;
+
+       /* Find the blocks immediately before and after the given one
+        * (if any) */
+       before = NULL;
+       after = NULL;
+       next = NULL;
+
+       list_for_each(l, &info->free_list) {
+               blk = list_entry(l, rh_block_t, list);
+
+               bs = (unsigned long)blk->start;
+               be = bs + blk->size;
+
+               if (next == NULL && s >= bs)
+                       next = blk;
+
+               if (be == s)
+                       before = blk;
+
+               if (e == bs)
+                       after = blk;
+
+               /* If both are not null, break now */
+               if (before != NULL && after != NULL)
+                       break;
+       }
+
+       /* Now check if they are really adjacent */
+       if (before != NULL && s != (unsigned long)before->start + before->size)
+               before = NULL;
+
+       if (after != NULL && e != (unsigned long)after->start)
+               after = NULL;
+
+       /* No coalescing; list insert and return */
+       if (before == NULL && after == NULL) {
+
+               if (next != NULL)
+                       list_add(&blkn->list, &next->list);
+               else
+                       list_add(&blkn->list, &info->free_list);
+
+               return;
+       }
+
+       /* We don't need it anymore */
+       release_slot(info, blkn);
+
+       /* Grow the before block */
+       if (before != NULL && after == NULL) {
+               before->size += size;
+               return;
+       }
+
+       /* Grow the after block backwards */
+       if (before == NULL && after != NULL) {
+               after->start = (int8_t *)after->start - size;
+               after->size += size;
+               return;
+       }
+
+       /* Grow the before block, and release the after block */
+       before->size += size + after->size;
+       list_del(&after->list);
+       release_slot(info, after);
+}
+
+static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
+{
+       rh_block_t *blk;
+       struct list_head *l;
+
+       /* Find the block immediately before the given one (if any) */
+       list_for_each(l, &info->taken_list) {
+               blk = list_entry(l, rh_block_t, list);
+               if (blk->start > blkn->start) {
+                       list_add_tail(&blkn->list, &blk->list);
+                       return;
+               }
+       }
+
+       list_add_tail(&blkn->list, &info->taken_list);
+}
+
+/*
+ * Create a remote heap dynamically.  Note that no memory for the blocks
+ * are allocated.  It will upon the first allocation
+ */
+rh_info_t *rh_create(unsigned int alignment)
+{
+       rh_info_t *info;
+
+       /* Alignment must be a power of two */
+       if ((alignment & (alignment - 1)) != 0)
+               return ERR_PTR(-EINVAL);
+
+       info = kmalloc(sizeof(*info), GFP_KERNEL);
+       if (info == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       info->alignment = alignment;
+
+       /* Initially everything as empty */
+       info->block = NULL;
+       info->max_blocks = 0;
+       info->empty_slots = 0;
+       info->flags = 0;
+
+       INIT_LIST_HEAD(&info->empty_list);
+       INIT_LIST_HEAD(&info->free_list);
+       INIT_LIST_HEAD(&info->taken_list);
+
+       return info;
+}
+
+/*
+ * Destroy a dynamically created remote heap.  Deallocate only if the areas
+ * are not static
+ */
+void rh_destroy(rh_info_t * info)
+{
+       if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
+               kfree(info->block);
+
+       if ((info->flags & RHIF_STATIC_INFO) == 0)
+               kfree(info);
+}
+
+/*
+ * Initialize in place a remote heap info block.  This is needed to support
+ * operation very early in the startup of the kernel, when it is not yet safe
+ * to call kmalloc.
+ */
+void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
+            rh_block_t * block)
+{
+       int i;
+       rh_block_t *blk;
+
+       /* Alignment must be a power of two */
+       if ((alignment & (alignment - 1)) != 0)
+               return;
+
+       info->alignment = alignment;
+
+       /* Initially everything as empty */
+       info->block = block;
+       info->max_blocks = max_blocks;
+       info->empty_slots = max_blocks;
+       info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
+
+       INIT_LIST_HEAD(&info->empty_list);
+       INIT_LIST_HEAD(&info->free_list);
+       INIT_LIST_HEAD(&info->taken_list);
+
+       /* Add all new blocks to the free list */
+       for (i = 0, blk = block; i < max_blocks; i++, blk++)
+               list_add(&blk->list, &info->empty_list);
+}
+
+/* Attach a free memory region, coalesces regions if adjuscent */
+int rh_attach_region(rh_info_t * info, void *start, int size)
+{
+       rh_block_t *blk;
+       unsigned long s, e, m;
+       int r;
+
+       /* The region must be aligned */
+       s = (unsigned long)start;
+       e = s + size;
+       m = info->alignment - 1;
+
+       /* Round start up */
+       s = (s + m) & ~m;
+
+       /* Round end down */
+       e = e & ~m;
+
+       /* Take final values */
+       start = (void *)s;
+       size = (int)(e - s);
+
+       /* Grow the blocks, if needed */
+       r = assure_empty(info, 1);
+       if (r < 0)
+               return r;
+
+       blk = get_slot(info);
+       blk->start = start;
+       blk->size = size;
+       blk->owner = NULL;
+
+       attach_free_block(info, blk);
+
+       return 0;
+}
+
+/* Detatch given address range, splits free block if needed. */
+void *rh_detach_region(rh_info_t * info, void *start, int size)
+{
+       struct list_head *l;
+       rh_block_t *blk, *newblk;
+       unsigned long s, e, m, bs, be;
+
+       /* Validate size */
+       if (size <= 0)
+               return ERR_PTR(-EINVAL);
+
+       /* The region must be aligned */
+       s = (unsigned long)start;
+       e = s + size;
+       m = info->alignment - 1;
+
+       /* Round start up */
+       s = (s + m) & ~m;
+
+       /* Round end down */
+       e = e & ~m;
+
+       if (assure_empty(info, 1) < 0)
+               return ERR_PTR(-ENOMEM);
+
+       blk = NULL;
+       list_for_each(l, &info->free_list) {
+               blk = list_entry(l, rh_block_t, list);
+               /* The range must lie entirely inside one free block */
+               bs = (unsigned long)blk->start;
+               be = (unsigned long)blk->start + blk->size;
+               if (s >= bs && e <= be)
+                       break;
+               blk = NULL;
+       }
+
+       if (blk == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       /* Perfect fit */
+       if (bs == s && be == e) {
+               /* Delete from free list, release slot */
+               list_del(&blk->list);
+               release_slot(info, blk);
+               return (void *)s;
+       }
+
+       /* blk still in free list, with updated start and/or size */
+       if (bs == s || be == e) {
+               if (bs == s)
+                       blk->start = (int8_t *)blk->start + size;
+               blk->size -= size;
+
+       } else {
+               /* The front free fragment */
+               blk->size = s - bs;
+
+               /* the back free fragment */
+               newblk = get_slot(info);
+               newblk->start = (void *)e;
+               newblk->size = be - e;
+
+               list_add(&newblk->list, &blk->list);
+       }
+
+       return (void *)s;
+}
+
+void *rh_alloc(rh_info_t * info, int size, const char *owner)
+{
+       struct list_head *l;
+       rh_block_t *blk;
+       rh_block_t *newblk;
+       void *start;
+
+       /* Validate size */
+       if (size <= 0)
+               return ERR_PTR(-EINVAL);
+
+       /* Align to configured alignment */
+       size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
+
+       if (assure_empty(info, 1) < 0)
+               return ERR_PTR(-ENOMEM);
+
+       blk = NULL;
+       list_for_each(l, &info->free_list) {
+               blk = list_entry(l, rh_block_t, list);
+               if (size <= blk->size)
+                       break;
+               blk = NULL;
+       }
+
+       if (blk == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       /* Just fits */
+       if (blk->size == size) {
+               /* Move from free list to taken list */
+               list_del(&blk->list);
+               blk->owner = owner;
+               start = blk->start;
+
+               attach_taken_block(info, blk);
+
+               return start;
+       }
+
+       newblk = get_slot(info);
+       newblk->start = blk->start;
+       newblk->size = size;
+       newblk->owner = owner;
+
+       /* blk still in free list, with updated start, size */
+       blk->start = (int8_t *)blk->start + size;
+       blk->size -= size;
+
+       start = newblk->start;
+
+       attach_taken_block(info, newblk);
+
+       return start;
+}
+
+/* allocate at precisely the given address */
+void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
+{
+       struct list_head *l;
+       rh_block_t *blk, *newblk1, *newblk2;
+       unsigned long s, e, m, bs, be;
+
+       /* Validate size */
+       if (size <= 0)
+               return ERR_PTR(-EINVAL);
+
+       /* The region must be aligned */
+       s = (unsigned long)start;
+       e = s + size;
+       m = info->alignment - 1;
+
+       /* Round start up */
+       s = (s + m) & ~m;
+
+       /* Round end down */
+       e = e & ~m;
+
+       if (assure_empty(info, 2) < 0)
+               return ERR_PTR(-ENOMEM);
+
+       blk = NULL;
+       list_for_each(l, &info->free_list) {
+               blk = list_entry(l, rh_block_t, list);
+               /* The range must lie entirely inside one free block */
+               bs = (unsigned long)blk->start;
+               be = (unsigned long)blk->start + blk->size;
+               if (s >= bs && e <= be)
+                       break;
+       }
+
+       if (blk == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       /* Perfect fit */
+       if (bs == s && be == e) {
+               /* Move from free list to taken list */
+               list_del(&blk->list);
+               blk->owner = owner;
+
+               start = blk->start;
+               attach_taken_block(info, blk);
+
+               return start;
+
+       }
+
+       /* blk still in free list, with updated start and/or size */
+       if (bs == s || be == e) {
+               if (bs == s)
+                       blk->start = (int8_t *)blk->start + size;
+               blk->size -= size;
+
+       } else {
+               /* The front free fragment */
+               blk->size = s - bs;
+
+               /* The back free fragment */
+               newblk2 = get_slot(info);
+               newblk2->start = (void *)e;
+               newblk2->size = be - e;
+
+               list_add(&newblk2->list, &blk->list);
+       }
+
+       newblk1 = get_slot(info);
+       newblk1->start = (void *)s;
+       newblk1->size = e - s;
+       newblk1->owner = owner;
+
+       start = newblk1->start;
+       attach_taken_block(info, newblk1);
+
+       return start;
+}
+
+int rh_free(rh_info_t * info, void *start)
+{
+       rh_block_t *blk, *blk2;
+       struct list_head *l;
+       int size;
+
+       /* Linear search for block */
+       blk = NULL;
+       list_for_each(l, &info->taken_list) {
+               blk2 = list_entry(l, rh_block_t, list);
+               if (start < blk2->start)
+                       break;
+               blk = blk2;
+       }
+
+       if (blk == NULL || start > (blk->start + blk->size))
+               return -EINVAL;
+
+       /* Remove from taken list */
+       list_del(&blk->list);
+
+       /* Get size of freed block */
+       size = blk->size;
+       attach_free_block(info, blk);
+
+       return size;
+}
+
+int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
+{
+       rh_block_t *blk;
+       struct list_head *l;
+       struct list_head *h;
+       int nr;
+
+       switch (what) {
+
+       case RHGS_FREE:
+               h = &info->free_list;
+               break;
+
+       case RHGS_TAKEN:
+               h = &info->taken_list;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       /* Linear search for block */
+       nr = 0;
+       list_for_each(l, h) {
+               blk = list_entry(l, rh_block_t, list);
+               if (stats != NULL && nr < max_stats) {
+                       stats->start = blk->start;
+                       stats->size = blk->size;
+                       stats->owner = blk->owner;
+                       stats++;
+               }
+               nr++;
+       }
+
+       return nr;
+}
+
+int rh_set_owner(rh_info_t * info, void *start, const char *owner)
+{
+       rh_block_t *blk, *blk2;
+       struct list_head *l;
+       int size;
+
+       /* Linear search for block */
+       blk = NULL;
+       list_for_each(l, &info->taken_list) {
+               blk2 = list_entry(l, rh_block_t, list);
+               if (start < blk2->start)
+                       break;
+               blk = blk2;
+       }
+
+       if (blk == NULL || start > (blk->start + blk->size))
+               return -EINVAL;
+
+       blk->owner = owner;
+       size = blk->size;
+
+       return size;
+}
+
+void rh_dump(rh_info_t * info)
+{
+       static rh_stats_t st[32];       /* XXX maximum 32 blocks */
+       int maxnr;
+       int i, nr;
+
+       maxnr = sizeof(st) / sizeof(st[0]);
+
+       printk(KERN_INFO
+              "info @0x%p (%d slots empty / %d max)\n",
+              info, info->empty_slots, info->max_blocks);
+
+       printk(KERN_INFO "  Free:\n");
+       nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
+       if (nr > maxnr)
+               nr = maxnr;
+       for (i = 0; i < nr; i++)
+               printk(KERN_INFO
+                      "    0x%p-0x%p (%u)\n",
+                      st[i].start, (int8_t *) st[i].start + st[i].size,
+                      st[i].size);
+       printk(KERN_INFO "\n");
+
+       printk(KERN_INFO "  Taken:\n");
+       nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
+       if (nr > maxnr)
+               nr = maxnr;
+       for (i = 0; i < nr; i++)
+               printk(KERN_INFO
+                      "    0x%p-0x%p (%u) %s\n",
+                      st[i].start, (int8_t *) st[i].start + st[i].size,
+                      st[i].size, st[i].owner != NULL ? st[i].owner : "");
+       printk(KERN_INFO "\n");
+}
+
+void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
+{
+       printk(KERN_INFO
+              "blk @0x%p: 0x%p-0x%p (%u)\n",
+              blk, blk->start, (int8_t *) blk->start + blk->size, blk->size);
+}
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
new file mode 100644 (file)
index 0000000..e79123d
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Single-step support.
+ *
+ * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <asm/sstep.h>
+#include <asm/processor.h>
+
+extern char system_call_common[];
+
+/* Bits in SRR1 that are copied from MSR */
+#define MSR_MASK       0xffffffff87c0ffff
+
+/*
+ * Determine whether a conditional branch instruction would branch.
+ */
+static int branch_taken(unsigned int instr, struct pt_regs *regs)
+{
+       unsigned int bo = (instr >> 21) & 0x1f;
+       unsigned int bi;
+
+       if ((bo & 4) == 0) {
+               /* decrement counter */
+               --regs->ctr;
+               if (((bo >> 1) & 1) ^ (regs->ctr == 0))
+                       return 0;
+       }
+       if ((bo & 0x10) == 0) {
+               /* check bit from CR */
+               bi = (instr >> 16) & 0x1f;
+               if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
+                       return 0;
+       }
+       return 1;
+}
+
+/*
+ * Emulate instructions that cause a transfer of control.
+ * Returns 1 if the step was emulated, 0 if not,
+ * or -1 if the instruction is one that should not be stepped,
+ * such as an rfid, or a mtmsrd that would clear MSR_RI.
+ */
+int emulate_step(struct pt_regs *regs, unsigned int instr)
+{
+       unsigned int opcode, rd;
+       unsigned long int imm;
+
+       opcode = instr >> 26;
+       switch (opcode) {
+       case 16:        /* bc */
+               imm = (signed short)(instr & 0xfffc);
+               if ((instr & 2) == 0)
+                       imm += regs->nip;
+               regs->nip += 4;
+               if ((regs->msr & MSR_SF) == 0)
+                       regs->nip &= 0xffffffffUL;
+               if (instr & 1)
+                       regs->link = regs->nip;
+               if (branch_taken(instr, regs))
+                       regs->nip = imm;
+               return 1;
+       case 17:        /* sc */
+               /*
+                * N.B. this uses knowledge about how the syscall
+                * entry code works.  If that is changed, this will
+                * need to be changed also.
+                */
+               regs->gpr[9] = regs->gpr[13];
+               regs->gpr[11] = regs->nip + 4;
+               regs->gpr[12] = regs->msr & MSR_MASK;
+               regs->gpr[13] = (unsigned long) get_paca();
+               regs->nip = (unsigned long) &system_call_common;
+               regs->msr = MSR_KERNEL;
+               return 1;
+       case 18:        /* b */
+               imm = instr & 0x03fffffc;
+               if (imm & 0x02000000)
+                       imm -= 0x04000000;
+               if ((instr & 2) == 0)
+                       imm += regs->nip;
+               if (instr & 1) {
+                       regs->link = regs->nip + 4;
+                       if ((regs->msr & MSR_SF) == 0)
+                               regs->link &= 0xffffffffUL;
+               }
+               if ((regs->msr & MSR_SF) == 0)
+                       imm &= 0xffffffffUL;
+               regs->nip = imm;
+               return 1;
+       case 19:
+               switch (instr & 0x7fe) {
+               case 0x20:      /* bclr */
+               case 0x420:     /* bcctr */
+                       imm = (instr & 0x400)? regs->ctr: regs->link;
+                       regs->nip += 4;
+                       if ((regs->msr & MSR_SF) == 0) {
+                               regs->nip &= 0xffffffffUL;
+                               imm &= 0xffffffffUL;
+                       }
+                       if (instr & 1)
+                               regs->link = regs->nip;
+                       if (branch_taken(instr, regs))
+                               regs->nip = imm;
+                       return 1;
+               case 0x24:      /* rfid, scary */
+                       return -1;
+               }
+       case 31:
+               rd = (instr >> 21) & 0x1f;
+               switch (instr & 0x7fe) {
+               case 0xa6:      /* mfmsr */
+                       regs->gpr[rd] = regs->msr & MSR_MASK;
+                       regs->nip += 4;
+                       if ((regs->msr & MSR_SF) == 0)
+                               regs->nip &= 0xffffffffUL;
+                       return 1;
+               case 0x164:     /* mtmsrd */
+                       /* only MSR_EE and MSR_RI get changed if bit 15 set */
+                       /* mtmsrd doesn't change MSR_HV and MSR_ME */
+                       imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
+                       imm = (regs->msr & MSR_MASK & ~imm)
+                               | (regs->gpr[rd] & imm);
+                       if ((imm & MSR_RI) == 0)
+                               /* can't step mtmsrd that would clear MSR_RI */
+                               return -1;
+                       regs->msr = imm;
+                       regs->nip += 4;
+                       if ((imm & MSR_SF) == 0)
+                               regs->nip &= 0xffffffffUL;
+                       return 1;
+               }
+       }
+       return 0;
+}
diff --git a/arch/powerpc/lib/strcase.c b/arch/powerpc/lib/strcase.c
new file mode 100644 (file)
index 0000000..36b5210
--- /dev/null
@@ -0,0 +1,23 @@
+#include <linux/ctype.h>
+
+int strcasecmp(const char *s1, const char *s2)
+{
+       int c1, c2;
+
+       do {
+               c1 = tolower(*s1++);
+               c2 = tolower(*s2++);
+       } while (c1 == c2 && c1 != 0);
+       return c1 - c2;
+}
+
+int strncasecmp(const char *s1, const char *s2, int n)
+{
+       int c1, c2;
+
+       do {
+               c1 = tolower(*s1++);
+               c2 = tolower(*s2++);
+       } while ((--n > 0) && c1 == c2 && c1 != 0);
+       return c1 - c2;
+}
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
new file mode 100644 (file)
index 0000000..b9ca84e
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * String handling functions for PowerPC.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/errno.h>
+#include <asm/ppc_asm.h>
+
+       .section __ex_table,"a"
+#ifdef CONFIG_PPC64
+       .align  3
+#define EXTBL  .llong
+#else
+       .align  2
+#define EXTBL  .long
+#endif
+       .text
+       
+_GLOBAL(strcpy)
+       addi    r5,r3,-1
+       addi    r4,r4,-1
+1:     lbzu    r0,1(r4)
+       cmpwi   0,r0,0
+       stbu    r0,1(r5)
+       bne     1b
+       blr
+
+/* This clears out any unused part of the destination buffer,
+   just as the libc version does.  -- paulus */
+_GLOBAL(strncpy)
+       cmpwi   0,r5,0
+       beqlr
+       mtctr   r5
+       addi    r6,r3,-1
+       addi    r4,r4,-1
+1:     lbzu    r0,1(r4)
+       cmpwi   0,r0,0
+       stbu    r0,1(r6)
+       bdnzf   2,1b            /* dec ctr, branch if ctr != 0 && !cr0.eq */
+       bnelr                   /* if we didn't hit a null char, we're done */
+       mfctr   r5
+       cmpwi   0,r5,0          /* any space left in destination buffer? */
+       beqlr                   /* we know r0 == 0 here */
+2:     stbu    r0,1(r6)        /* clear it out if so */
+       bdnz    2b
+       blr
+
+_GLOBAL(strcat)
+       addi    r5,r3,-1
+       addi    r4,r4,-1
+1:     lbzu    r0,1(r5)
+       cmpwi   0,r0,0
+       bne     1b
+       addi    r5,r5,-1
+1:     lbzu    r0,1(r4)
+       cmpwi   0,r0,0
+       stbu    r0,1(r5)
+       bne     1b
+       blr
+
+_GLOBAL(strcmp)
+       addi    r5,r3,-1
+       addi    r4,r4,-1
+1:     lbzu    r3,1(r5)
+       cmpwi   1,r3,0
+       lbzu    r0,1(r4)
+       subf.   r3,r0,r3
+       beqlr   1
+       beq     1b
+       blr
+
+_GLOBAL(strlen)
+       addi    r4,r3,-1
+1:     lbzu    r0,1(r4)
+       cmpwi   0,r0,0
+       bne     1b
+       subf    r3,r3,r4
+       blr
+
+_GLOBAL(memcmp)
+       cmpwi   0,r5,0
+       ble-    2f
+       mtctr   r5
+       addi    r6,r3,-1
+       addi    r4,r4,-1
+1:     lbzu    r3,1(r6)
+       lbzu    r0,1(r4)
+       subf.   r3,r0,r3
+       bdnzt   2,1b
+       blr
+2:     li      r3,0
+       blr
+
+_GLOBAL(memchr)
+       cmpwi   0,r5,0
+       ble-    2f
+       mtctr   r5
+       addi    r3,r3,-1
+1:     lbzu    r0,1(r3)
+       cmpw    0,r0,r4
+       bdnzf   2,1b
+       beqlr
+2:     li      r3,0
+       blr
+
+_GLOBAL(__clear_user)
+       addi    r6,r3,-4
+       li      r3,0
+       li      r5,0
+       cmplwi  0,r4,4
+       blt     7f
+       /* clear a single word */
+11:    stwu    r5,4(r6)
+       beqlr
+       /* clear word sized chunks */
+       andi.   r0,r6,3
+       add     r4,r0,r4
+       subf    r6,r0,r6
+       srwi    r0,r4,2
+       andi.   r4,r4,3
+       mtctr   r0
+       bdz     7f
+1:     stwu    r5,4(r6)
+       bdnz    1b
+       /* clear byte sized chunks */
+7:     cmpwi   0,r4,0
+       beqlr
+       mtctr   r4
+       addi    r6,r6,3
+8:     stbu    r5,1(r6)
+       bdnz    8b
+       blr
+90:    mr      r3,r4
+       blr
+91:    mfctr   r3
+       slwi    r3,r3,2
+       add     r3,r3,r4
+       blr
+92:    mfctr   r3
+       blr
+
+       .section __ex_table,"a"
+       EXTBL   11b,90b
+       EXTBL   1b,91b
+       EXTBL   8b,92b
+       .text
+
+_GLOBAL(__strncpy_from_user)
+       addi    r6,r3,-1
+       addi    r4,r4,-1
+       cmpwi   0,r5,0
+       beq     2f
+       mtctr   r5
+1:     lbzu    r0,1(r4)
+       cmpwi   0,r0,0
+       stbu    r0,1(r6)
+       bdnzf   2,1b            /* dec ctr, branch if ctr != 0 && !cr0.eq */
+       beq     3f
+2:     addi    r6,r6,1
+3:     subf    r3,r3,r6
+       blr
+99:    li      r3,-EFAULT
+       blr
+
+       .section __ex_table,"a"
+       EXTBL   1b,99b
+       .text
+
+/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
+_GLOBAL(__strnlen_user)
+       addi    r7,r3,-1
+       subf    r6,r7,r5        /* top+1 - str */
+       cmplw   0,r4,r6
+       bge     0f
+       mr      r6,r4
+0:     mtctr   r6              /* ctr = min(len, top - str) */
+1:     lbzu    r0,1(r7)        /* get next byte */
+       cmpwi   0,r0,0
+       bdnzf   2,1b            /* loop if --ctr != 0 && byte != 0 */
+       addi    r7,r7,1
+       subf    r3,r3,r7        /* number of bytes we have looked at */
+       beqlr                   /* return if we found a 0 byte */
+       cmpw    0,r3,r4         /* did we look at all len bytes? */
+       blt     99f             /* if not, must have hit top */
+       addi    r3,r4,1         /* return len + 1 to indicate no null found */
+       blr
+99:    li      r3,0            /* bad address, return 0 */
+       blr
+
+       .section __ex_table,"a"
+       EXTBL   1b,99b
diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
new file mode 100644 (file)
index 0000000..5eea6f3
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Functions which are too large to be inlined.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       if (likely(access_ok(VERIFY_READ, from, n)))
+               n = __copy_from_user(to, from, n);
+       else
+               memset(to, 0, n);
+       return n;
+}
+
+unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       if (likely(access_ok(VERIFY_WRITE, to, n)))
+               n = __copy_to_user(to, from, n);
+       return n;
+}
+
+unsigned long copy_in_user(void __user *to, const void __user *from,
+                          unsigned long n)
+{
+       might_sleep();
+       if (likely(access_ok(VERIFY_READ, from, n) &&
+           access_ok(VERIFY_WRITE, to, n)))
+               n =__copy_tofrom_user(to, from, n);
+       return n;
+}
+
+EXPORT_SYMBOL(copy_from_user);
+EXPORT_SYMBOL(copy_to_user);
+EXPORT_SYMBOL(copy_in_user);
+
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
new file mode 100644 (file)
index 0000000..3d79ce2
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * Modifications by Matt Porter (mporter@mvista.com) to support
+ * PPC44x Book E processors.
+ *
+ * This file contains the routines for initializing the MMU
+ * on the 4xx series of chips.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/bootx.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+
+#include "mmu_decl.h"
+
+extern char etext[], _stext[];
+
+/* Used by the 44x TLB replacement exception handler.
+ * Just needed it declared someplace.
+ */
+unsigned int tlb_44x_index = 0;
+unsigned int tlb_44x_hwater = 62;
+
+/*
+ * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
+ */
+static void __init
+ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys)
+{
+       unsigned long attrib = 0;
+
+       __asm__ __volatile__("\
+       clrrwi  %2,%2,10\n\
+       ori     %2,%2,%4\n\
+       clrrwi  %1,%1,10\n\
+       li      %0,0\n\
+       ori     %0,%0,%5\n\
+       tlbwe   %2,%3,%6\n\
+       tlbwe   %1,%3,%7\n\
+       tlbwe   %0,%3,%8"
+       :
+       : "r" (attrib), "r" (phys), "r" (virt), "r" (slot),
+         "i" (PPC44x_TLB_VALID | PPC44x_TLB_256M),
+         "i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
+         "i" (PPC44x_TLB_PAGEID),
+         "i" (PPC44x_TLB_XLAT),
+         "i" (PPC44x_TLB_ATTRIB));
+}
+
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+       flush_instruction_cache();
+}
+
+unsigned long __init mmu_mapin_ram(void)
+{
+       unsigned int pinned_tlbs = 1;
+       int i;
+
+       /* Determine number of entries necessary to cover lowmem */
+       pinned_tlbs = (unsigned int)
+               (_ALIGN(total_lowmem, PPC44x_PIN_SIZE) >> PPC44x_PIN_SHIFT);
+
+       /* Write upper watermark to save location */
+       tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
+
+       /* If necessary, set additional pinned TLBs */
+       if (pinned_tlbs > 1)
+               for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) {
+                       unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
+                       ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
+               }
+
+       return total_lowmem;
+}
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c
new file mode 100644 (file)
index 0000000..b7bcbc2
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * This file contains the routines for initializing the MMU
+ * on the 4xx series of chips.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/bootx.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+#include "mmu_decl.h"
+
+extern int __map_without_ltlbs;
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+       /*
+        * The Zone Protection Register (ZPR) defines how protection will
+        * be applied to every page which is a member of a given zone. At
+        * present, we utilize only two of the 4xx's zones.
+        * The zone index bits (of ZSEL) in the PTE are used for software
+        * indicators, except the LSB.  For user access, zone 1 is used,
+        * for kernel access, zone 0 is used.  We set all but zone 1
+        * to zero, allowing only kernel access as indicated in the PTE.
+        * For zone 1, we set a 01 binary (a value of 10 will not work)
+        * to allow user access as indicated in the PTE.  This also allows
+        * kernel access as indicated in the PTE.
+        */
+
+        mtspr(SPRN_ZPR, 0x10000000);
+
+       flush_instruction_cache();
+
+       /*
+        * Set up the real-mode cache parameters for the exception vector
+        * handlers (which are run in real-mode).
+        */
+
+        mtspr(SPRN_DCWR, 0x00000000);  /* All caching is write-back */
+
+        /*
+        * Cache instruction and data space where the exception
+        * vectors and the kernel live in real-mode.
+        */
+
+        mtspr(SPRN_DCCR, 0xF0000000);  /* 512 MB of data space at 0x0. */
+        mtspr(SPRN_ICCR, 0xF0000000);  /* 512 MB of instr. space at 0x0. */
+}
+
+#define LARGE_PAGE_SIZE_16M    (1<<24)
+#define LARGE_PAGE_SIZE_4M     (1<<22)
+
+unsigned long __init mmu_mapin_ram(void)
+{
+       unsigned long v, s;
+       phys_addr_t p;
+
+       v = KERNELBASE;
+       p = PPC_MEMSTART;
+       s = 0;
+
+       if (__map_without_ltlbs) {
+               return s;
+       }
+
+       while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) {
+               pmd_t *pmdp;
+               unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
+
+               spin_lock(&init_mm.page_table_lock);
+               pmdp = pmd_offset(pgd_offset_k(v), v);
+               pmd_val(*pmdp++) = val;
+               pmd_val(*pmdp++) = val;
+               pmd_val(*pmdp++) = val;
+               pmd_val(*pmdp++) = val;
+               spin_unlock(&init_mm.page_table_lock);
+
+               v += LARGE_PAGE_SIZE_16M;
+               p += LARGE_PAGE_SIZE_16M;
+               s += LARGE_PAGE_SIZE_16M;
+       }
+
+       while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) {
+               pmd_t *pmdp;
+               unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
+
+               spin_lock(&init_mm.page_table_lock);
+               pmdp = pmd_offset(pgd_offset_k(v), v);
+               pmd_val(*pmdp) = val;
+               spin_unlock(&init_mm.page_table_lock);
+
+               v += LARGE_PAGE_SIZE_4M;
+               p += LARGE_PAGE_SIZE_4M;
+               s += LARGE_PAGE_SIZE_4M;
+       }
+
+       return s;
+}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
new file mode 100644 (file)
index 0000000..93441e7
--- /dev/null
@@ -0,0 +1,21 @@
+#
+# Makefile for the linux ppc-specific parts of the memory manager.
+#
+
+ifeq ($(CONFIG_PPC64),y)
+EXTRA_CFLAGS   += -mno-minimal-toc
+endif
+
+obj-y                          := fault.o mem.o lmb.o
+obj-$(CONFIG_PPC32)            += init_32.o pgtable_32.o mmu_context_32.o
+hash-$(CONFIG_PPC_MULTIPLATFORM) := hash_native_64.o
+obj-$(CONFIG_PPC64)            += init_64.o pgtable_64.o mmu_context_64.o \
+                                  hash_utils_64.o hash_low_64.o tlb_64.o \
+                                  slb_low.o slb.o stab.o mmap.o imalloc.o \
+                                  $(hash-y)
+obj-$(CONFIG_PPC_STD_MMU_32)   += ppc_mmu_32.o hash_low_32.o tlb_32.o
+obj-$(CONFIG_40x)              += 4xx_mmu.o
+obj-$(CONFIG_44x)              += 44x_mmu.o
+obj-$(CONFIG_FSL_BOOKE)                += fsl_booke_mmu.o
+obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
+obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
similarity index 76%
rename from arch/ppc64/mm/fault.c
rename to arch/powerpc/mm/fault.c
index be3f25c..3df641f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  arch/ppc/mm/fault.c
  *
- *  PowerPC version 
+ *  PowerPC version
  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  *
  *  Derived from "arch/i386/mm/fault.c"
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <linux/ptrace.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
-#include <linux/smp_lock.h>
+#include <linux/highmem.h>
 #include <linux/module.h>
 #include <linux/kprobes.h>
 
@@ -37,6 +38,7 @@
 #include <asm/mmu_context.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
+#include <asm/tlbflush.h>
 #include <asm/kdebug.h>
 #include <asm/siginfo.h>
 
@@ -101,10 +103,15 @@ static void do_dabr(struct pt_regs *regs, unsigned long error_code)
 }
 
 /*
- * The error_code parameter is
+ * For 600- and 800-family processors, the error_code parameter is DSISR
+ * for a data fault, SRR1 for an instruction fault. For 400-family processors
+ * the error_code parameter is ESR for a data fault, 0 for an instruction
+ * fault.
+ * For 64-bit processors, the error_code parameter is
  *  - DSISR for a non-SLB data access fault,
  *  - SRR1 & 0x08000000 for a non-SLB instruction access fault
  *  - 0 any SLB fault.
+ *
  * The return value is 0 if the fault was handled, or the signal
  * number if this is a kernel fault that can't be handled here.
  */
@@ -114,12 +121,25 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        struct vm_area_struct * vma;
        struct mm_struct *mm = current->mm;
        siginfo_t info;
-       unsigned long code = SEGV_MAPERR;
-       unsigned long is_write = error_code & DSISR_ISSTORE;
-       unsigned long trap = TRAP(regs);
-       unsigned long is_exec = trap == 0x400;
+       int code = SEGV_MAPERR;
+       int is_write = 0;
+       int trap = TRAP(regs);
+       int is_exec = trap == 0x400;
 
-       BUG_ON((trap == 0x380) || (trap == 0x480));
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+       /*
+        * Fortunately the bit assignments in SRR1 for an instruction
+        * fault and DSISR for a data fault are mostly the same for the
+        * bits we are interested in.  But there are some bits which
+        * indicate errors in DSISR but can validly be set in SRR1.
+        */
+       if (trap == 0x400)
+               error_code &= 0x48200000;
+       else
+               is_write = error_code & DSISR_ISSTORE;
+#else
+       is_write = error_code & ESR_DST;
+#endif /* CONFIG_4xx || CONFIG_BOOKE */
 
        if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
                                11, SIGSEGV) == NOTIFY_STOP)
@@ -134,10 +154,13 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        if (!user_mode(regs) && (address >= TASK_SIZE))
                return SIGSEGV;
 
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
        if (error_code & DSISR_DABRMATCH) {
+               /* DABR match */
                do_dabr(regs, error_code);
                return 0;
        }
+#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
 
        if (in_atomic() || mm == NULL) {
                if (!user_mode(regs))
@@ -176,10 +199,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        vma = find_vma(mm, address);
        if (!vma)
                goto bad_area;
-
-       if (vma->vm_start <= address) {
+       if (vma->vm_start <= address)
                goto good_area;
-       }
        if (!(vma->vm_flags & VM_GROWSDOWN))
                goto bad_area;
 
@@ -214,35 +235,76 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
                    && (!user_mode(regs) || !store_updates_sp(regs)))
                        goto bad_area;
        }
-
        if (expand_stack(vma, address))
                goto bad_area;
 
 good_area:
        code = SEGV_ACCERR;
+#if defined(CONFIG_6xx)
+       if (error_code & 0x95700000)
+               /* an error such as lwarx to I/O controller space,
+                  address matching DABR, eciwx, etc. */
+               goto bad_area;
+#endif /* CONFIG_6xx */
+#if defined(CONFIG_8xx)
+        /* The MPC8xx seems to always set 0x80000000, which is
+         * "undefined".  Of those that can be set, this is the only
+         * one which seems bad.
+         */
+       if (error_code & 0x10000000)
+                /* Guarded storage error. */
+               goto bad_area;
+#endif /* CONFIG_8xx */
 
        if (is_exec) {
+#ifdef CONFIG_PPC64
                /* protection fault */
                if (error_code & DSISR_PROTFAULT)
                        goto bad_area;
                if (!(vma->vm_flags & VM_EXEC))
                        goto bad_area;
+#endif
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+               pte_t *ptep;
+
+               /* Since 4xx/Book-E supports per-page execute permission,
+                * we lazily flush dcache to icache. */
+               ptep = NULL;
+               if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
+                       struct page *page = pte_page(*ptep);
+
+                       if (! test_bit(PG_arch_1, &page->flags)) {
+                               flush_dcache_icache_page(page);
+                               set_bit(PG_arch_1, &page->flags);
+                       }
+                       pte_update(ptep, 0, _PAGE_HWEXEC);
+                       _tlbie(address);
+                       pte_unmap(ptep);
+                       up_read(&mm->mmap_sem);
+                       return 0;
+               }
+               if (ptep != NULL)
+                       pte_unmap(ptep);
+#endif
        /* a write */
        } else if (is_write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
        /* a read */
        } else {
-               if (!(vma->vm_flags & VM_READ))
+               /* protection fault */
+               if (error_code & 0x08000000)
+                       goto bad_area;
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
        }
 
- survive:
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
+ survive:
        switch (handle_mm_fault(mm, vma, address, is_write)) {
 
        case VM_FAULT_MINOR:
@@ -268,15 +330,11 @@ bad_area:
 bad_area_nosemaphore:
        /* User mode accesses cause a SIGSEGV */
        if (user_mode(regs)) {
-               info.si_signo = SIGSEGV;
-               info.si_errno = 0;
-               info.si_code = code;
-               info.si_addr = (void __user *) address;
-               force_sig_info(SIGSEGV, &info, current);
+               _exception(SIGSEGV, regs, code, address);
                return 0;
        }
 
-       if (trap == 0x400 && (error_code & DSISR_PROTFAULT)
+       if (is_exec && (error_code & DSISR_PROTFAULT)
            && printk_ratelimit())
                printk(KERN_CRIT "kernel tried to execute NX-protected"
                       " page (%lx) - exploit attempt? (uid: %d)\n",
@@ -315,8 +373,8 @@ do_sigbus:
 
 /*
  * bad_page_fault is called when we have a bad access from the kernel.
- * It is called from do_page_fault above and from some of the procedures
- * in traps.c.
+ * It is called from the DSI and ISI handlers in head.S and from some
+ * of the procedures in traps.c.
  */
 void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
 {
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
new file mode 100644 (file)
index 0000000..af9ca0e
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ * Modifications by Kumar Gala (kumar.gala@freescale.com) to support
+ * E500 Book E processors.
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This file contains the routines for initializing the MMU
+ * on the 4xx series of chips.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/bootx.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+
+extern void loadcam_entry(unsigned int index);
+unsigned int tlbcam_index;
+unsigned int num_tlbcam_entries;
+static unsigned long __cam0, __cam1, __cam2;
+extern unsigned long total_lowmem;
+extern unsigned long __max_low_memory;
+#define MAX_LOW_MEM    CONFIG_LOWMEM_SIZE
+
+#define NUM_TLBCAMS    (16)
+
+struct tlbcam {
+       u32     MAS0;
+       u32     MAS1;
+       u32     MAS2;
+       u32     MAS3;
+       u32     MAS7;
+} TLBCAM[NUM_TLBCAMS];
+
+struct tlbcamrange {
+       unsigned long start;
+       unsigned long limit;
+       phys_addr_t phys;
+} tlbcam_addrs[NUM_TLBCAMS];
+
+extern unsigned int tlbcam_index;
+
+/*
+ * Return PA for this VA if it is mapped by a CAM, or 0
+ */
+unsigned long v_mapped_by_tlbcam(unsigned long va)
+{
+       int b;
+       for (b = 0; b < tlbcam_index; ++b)
+               if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
+                       return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
+       return 0;
+}
+
+/*
+ * Return VA for a given PA or 0 if not mapped
+ */
+unsigned long p_mapped_by_tlbcam(unsigned long pa)
+{
+       int b;
+       for (b = 0; b < tlbcam_index; ++b)
+               if (pa >= tlbcam_addrs[b].phys
+                   && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
+                             +tlbcam_addrs[b].phys)
+                       return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
+       return 0;
+}
+
+/*
+ * Set up one of the I/D BAT (block address translation) register pairs.
+ * The parameters are not checked; in particular size must be a power
+ * of 4 between 4k and 256M.
+ */
+void settlbcam(int index, unsigned long virt, phys_addr_t phys,
+               unsigned int size, int flags, unsigned int pid)
+{
+       unsigned int tsize, lz;
+
+       asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
+       tsize = (21 - lz) / 2;
+
+#ifdef CONFIG_SMP
+       if ((flags & _PAGE_NO_CACHE) == 0)
+               flags |= _PAGE_COHERENT;
+#endif
+
+       TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
+       TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
+       TLBCAM[index].MAS2 = virt & PAGE_MASK;
+
+       TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
+       TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
+       TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
+       TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
+       TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
+
+       TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
+       TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
+
+#ifndef CONFIG_KGDB /* want user access for breakpoints */
+       if (flags & _PAGE_USER) {
+          TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
+          TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+       }
+#else
+       TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
+       TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+#endif
+
+       tlbcam_addrs[index].start = virt;
+       tlbcam_addrs[index].limit = virt + size - 1;
+       tlbcam_addrs[index].phys = phys;
+
+       loadcam_entry(index);
+}
+
+void invalidate_tlbcam_entry(int index)
+{
+       TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index);
+       TLBCAM[index].MAS1 = ~MAS1_VALID;
+
+       loadcam_entry(index);
+}
+
+void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
+               unsigned long cam2)
+{
+       settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
+       tlbcam_index++;
+       if (cam1) {
+               tlbcam_index++;
+               settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
+       }
+       if (cam2) {
+               tlbcam_index++;
+               settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
+       }
+}
+
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+       flush_instruction_cache();
+}
+
+unsigned long __init mmu_mapin_ram(void)
+{
+       cam_mapin_ram(__cam0, __cam1, __cam2);
+
+       return __cam0 + __cam1 + __cam2;
+}
+
+
+void __init
+adjust_total_lowmem(void)
+{
+       unsigned long max_low_mem = MAX_LOW_MEM;
+       unsigned long cam_max = 0x10000000;
+       unsigned long ram;
+
+       /* adjust CAM size to max_low_mem */
+       if (max_low_mem < cam_max)
+               cam_max = max_low_mem;
+
+       /* adjust lowmem size to max_low_mem */
+       if (max_low_mem < total_lowmem)
+               ram = max_low_mem;
+       else
+               ram = total_lowmem;
+
+       /* Calculate CAM values */
+       __cam0 = 1UL << 2 * (__ilog2(ram) / 2);
+       if (__cam0 > cam_max)
+               __cam0 = cam_max;
+       ram -= __cam0;
+       if (ram) {
+               __cam1 = 1UL << 2 * (__ilog2(ram) / 2);
+               if (__cam1 > cam_max)
+                       __cam1 = cam_max;
+               ram -= __cam1;
+       }
+       if (ram) {
+               __cam2 = 1UL << 2 * (__ilog2(ram) / 2);
+               if (__cam2 > cam_max)
+                       __cam2 = cam_max;
+               ram -= __cam2;
+       }
+
+       printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb,"
+                       " CAM2=%ldMb residual: %ldMb\n",
+                       __cam0 >> 20, __cam1 >> 20, __cam2 >> 20,
+                       (total_lowmem - __cam0 - __cam1 - __cam2) >> 20);
+       __max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2;
+}
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
new file mode 100644 (file)
index 0000000..12ccd71
--- /dev/null
@@ -0,0 +1,618 @@
+/*
+ *  arch/ppc/kernel/hashtable.S
+ *
+ *  $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *  Adapted for Power Macintosh by Paul Mackerras.
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *
+ *  This file contains low-level assembler routines for managing
+ *  the PowerPC MMU hash table.  (PPC 8xx processors don't use a
+ *  hash table, so this file is not used on them.)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/ppc_asm.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_SMP
+       .comm   mmu_hash_lock,4
+#endif /* CONFIG_SMP */
+
+/*
+ * Sync CPUs with hash_page taking & releasing the hash
+ * table lock
+ */
+#ifdef CONFIG_SMP
+       .text
+_GLOBAL(hash_page_sync)
+       lis     r8,mmu_hash_lock@h
+       ori     r8,r8,mmu_hash_lock@l
+       lis     r0,0x0fff
+       b       10f
+11:    lwz     r6,0(r8)
+       cmpwi   0,r6,0
+       bne     11b
+10:    lwarx   r6,0,r8
+       cmpwi   0,r6,0
+       bne-    11b
+       stwcx.  r0,0,r8
+       bne-    10b
+       isync
+       eieio
+       li      r0,0
+       stw     r0,0(r8)
+       blr     
+#endif
+
+/*
+ * Load a PTE into the hash table, if possible.
+ * The address is in r4, and r3 contains an access flag:
+ * _PAGE_RW (0x400) if a write.
+ * r9 contains the SRR1 value, from which we use the MSR_PR bit.
+ * SPRG3 contains the physical address of the current task's thread.
+ *
+ * Returns to the caller if the access is illegal or there is no
+ * mapping for the address.  Otherwise it places an appropriate PTE
+ * in the hash table and returns from the exception.
+ * Uses r0, r3 - r8, ctr, lr.
+ */
+       .text
+_GLOBAL(hash_page)
+#ifdef CONFIG_PPC64BRIDGE
+       mfmsr   r0
+       clrldi  r0,r0,1         /* make sure it's in 32-bit mode */
+       MTMSRD(r0)
+       isync
+#endif
+       tophys(r7,0)                    /* gets -KERNELBASE into r7 */
+#ifdef CONFIG_SMP
+       addis   r8,r7,mmu_hash_lock@h
+       ori     r8,r8,mmu_hash_lock@l
+       lis     r0,0x0fff
+       b       10f
+11:    lwz     r6,0(r8)
+       cmpwi   0,r6,0
+       bne     11b
+10:    lwarx   r6,0,r8
+       cmpwi   0,r6,0
+       bne-    11b
+       stwcx.  r0,0,r8
+       bne-    10b
+       isync
+#endif
+       /* Get PTE (linux-style) and check access */
+       lis     r0,KERNELBASE@h         /* check if kernel address */
+       cmplw   0,r4,r0
+       mfspr   r8,SPRN_SPRG3           /* current task's THREAD (phys) */
+       ori     r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
+       lwz     r5,PGDIR(r8)            /* virt page-table root */
+       blt+    112f                    /* assume user more likely */
+       lis     r5,swapper_pg_dir@ha    /* if kernel address, use */
+       addi    r5,r5,swapper_pg_dir@l  /* kernel page table */
+       rlwimi  r3,r9,32-12,29,29       /* MSR_PR -> _PAGE_USER */
+112:   add     r5,r5,r7                /* convert to phys addr */
+       rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
+       lwz     r8,0(r5)                /* get pmd entry */
+       rlwinm. r8,r8,0,0,19            /* extract address of pte page */
+#ifdef CONFIG_SMP
+       beq-    hash_page_out           /* return if no mapping */
+#else
+       /* XXX it seems like the 601 will give a machine fault on the
+          rfi if its alignment is wrong (bottom 4 bits of address are
+          8 or 0xc) and we have had a not-taken conditional branch
+          to the address following the rfi. */
+       beqlr-
+#endif
+       rlwimi  r8,r4,22,20,29          /* insert next 10 bits of address */
+       rlwinm  r0,r3,32-3,24,24        /* _PAGE_RW access -> _PAGE_DIRTY */
+       ori     r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
+
+       /*
+        * Update the linux PTE atomically.  We do the lwarx up-front
+        * because almost always, there won't be a permission violation
+        * and there won't already be an HPTE, and thus we will have
+        * to update the PTE to set _PAGE_HASHPTE.  -- paulus.
+        */
+retry:
+       lwarx   r6,0,r8                 /* get linux-style pte */
+       andc.   r5,r3,r6                /* check access & ~permission */
+#ifdef CONFIG_SMP
+       bne-    hash_page_out           /* return if access not permitted */
+#else
+       bnelr-
+#endif
+       or      r5,r0,r6                /* set accessed/dirty bits */
+       stwcx.  r5,0,r8                 /* attempt to update PTE */
+       bne-    retry                   /* retry if someone got there first */
+
+       mfsrin  r3,r4                   /* get segment reg for segment */
+       mfctr   r0
+       stw     r0,_CTR(r11)
+       bl      create_hpte             /* add the hash table entry */
+
+#ifdef CONFIG_SMP
+       eieio
+       addis   r8,r7,mmu_hash_lock@ha
+       li      r0,0
+       stw     r0,mmu_hash_lock@l(r8)
+#endif
+
+       /* Return from the exception */
+       lwz     r5,_CTR(r11)
+       mtctr   r5
+       lwz     r0,GPR0(r11)
+       lwz     r7,GPR7(r11)
+       lwz     r8,GPR8(r11)
+       b       fast_exception_return
+
+#ifdef CONFIG_SMP
+hash_page_out:
+       eieio
+       addis   r8,r7,mmu_hash_lock@ha
+       li      r0,0
+       stw     r0,mmu_hash_lock@l(r8)
+       blr
+#endif /* CONFIG_SMP */
+
+/*
+ * Add an entry for a particular page to the hash table.
+ *
+ * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
+ *
+ * We assume any necessary modifications to the pte (e.g. setting
+ * the accessed bit) have already been done and that there is actually
+ * a hash table in use (i.e. we're not on a 603).
+ */
+_GLOBAL(add_hash_page)
+       mflr    r0
+       stw     r0,4(r1)
+
+       /* Convert context and va to VSID */
+       mulli   r3,r3,897*16            /* multiply context by context skew */
+       rlwinm  r0,r4,4,28,31           /* get ESID (top 4 bits of va) */
+       mulli   r0,r0,0x111             /* multiply by ESID skew */
+       add     r3,r3,r0                /* note create_hpte trims to 24 bits */
+
+#ifdef CONFIG_SMP
+       rlwinm  r8,r1,0,0,18            /* use cpu number to make tag */
+       lwz     r8,TI_CPU(r8)           /* to go in mmu_hash_lock */
+       oris    r8,r8,12
+#endif /* CONFIG_SMP */
+
+       /*
+        * We disable interrupts here, even on UP, because we don't
+        * want to race with hash_page, and because we want the
+        * _PAGE_HASHPTE bit to be a reliable indication of whether
+        * the HPTE exists (or at least whether one did once).
+        * We also turn off the MMU for data accesses so that we
+        * we can't take a hash table miss (assuming the code is
+        * covered by a BAT).  -- paulus
+        */
+       mfmsr   r10
+       SYNC
+       rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
+       rlwinm  r0,r0,0,28,26           /* clear MSR_DR */
+       mtmsr   r0
+       SYNC_601
+       isync
+
+       tophys(r7,0)
+
+#ifdef CONFIG_SMP
+       addis   r9,r7,mmu_hash_lock@ha
+       addi    r9,r9,mmu_hash_lock@l
+10:    lwarx   r0,0,r9                 /* take the mmu_hash_lock */
+       cmpi    0,r0,0
+       bne-    11f
+       stwcx.  r8,0,r9
+       beq+    12f
+11:    lwz     r0,0(r9)
+       cmpi    0,r0,0
+       beq     10b
+       b       11b
+12:    isync
+#endif
+
+       /*
+        * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
+        * If _PAGE_HASHPTE was already set, we don't replace the existing
+        * HPTE, so we just unlock and return.
+        */
+       mr      r8,r5
+       rlwimi  r8,r4,22,20,29
+1:     lwarx   r6,0,r8
+       andi.   r0,r6,_PAGE_HASHPTE
+       bne     9f                      /* if HASHPTE already set, done */
+       ori     r5,r6,_PAGE_HASHPTE
+       stwcx.  r5,0,r8
+       bne-    1b
+
+       bl      create_hpte
+
+9:
+#ifdef CONFIG_SMP
+       eieio
+       li      r0,0
+       stw     r0,0(r9)                /* clear mmu_hash_lock */
+#endif
+
+       /* reenable interrupts and DR */
+       mtmsr   r10
+       SYNC_601
+       isync
+
+       lwz     r0,4(r1)
+       mtlr    r0
+       blr
+
+/*
+ * This routine adds a hardware PTE to the hash table.
+ * It is designed to be called with the MMU either on or off.
+ * r3 contains the VSID, r4 contains the virtual address,
+ * r5 contains the linux PTE, r6 contains the old value of the
+ * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
+ * offset to be added to addresses (0 if the MMU is on,
+ * -KERNELBASE if it is off).
+ * On SMP, the caller should have the mmu_hash_lock held.
+ * We assume that the caller has (or will) set the _PAGE_HASHPTE
+ * bit in the linux PTE in memory.  The value passed in r6 should
+ * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
+ * this routine will skip the search for an existing HPTE.
+ * This procedure modifies r0, r3 - r6, r8, cr0.
+ *  -- paulus.
+ *
+ * For speed, 4 of the instructions get patched once the size and
+ * physical address of the hash table are known.  These definitions
+ * of Hash_base and Hash_bits below are just an example.
+ */
+Hash_base = 0xc0180000
+Hash_bits = 12                         /* e.g. 256kB hash table */
+Hash_msk = (((1 << Hash_bits) - 1) * 64)
+
+#ifndef CONFIG_PPC64BRIDGE
+/* defines for the PTE format for 32-bit PPCs */
+#define PTE_SIZE       8
+#define PTEG_SIZE      64
+#define LG_PTEG_SIZE   6
+#define LDPTEu         lwzu
+#define STPTE          stw
+#define CMPPTE         cmpw
+#define PTE_H          0x40
+#define PTE_V          0x80000000
+#define TST_V(r)       rlwinm. r,r,0,0,0
+#define SET_V(r)       oris r,r,PTE_V@h
+#define CLR_V(r,t)     rlwinm r,r,0,1,31
+
+#else
+/* defines for the PTE format for 64-bit PPCs */
+#define PTE_SIZE       16
+#define PTEG_SIZE      128
+#define LG_PTEG_SIZE   7
+#define LDPTEu         ldu
+#define STPTE          std
+#define CMPPTE         cmpd
+#define PTE_H          2
+#define PTE_V          1
+#define TST_V(r)       andi. r,r,PTE_V
+#define SET_V(r)       ori r,r,PTE_V
+#define CLR_V(r,t)     li t,PTE_V; andc r,r,t
+#endif /* CONFIG_PPC64BRIDGE */
+
+#define HASH_LEFT      31-(LG_PTEG_SIZE+Hash_bits-1)
+#define HASH_RIGHT     31-LG_PTEG_SIZE
+
+_GLOBAL(create_hpte)
+       /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
+       rlwinm  r8,r5,32-10,31,31       /* _PAGE_RW -> PP lsb */
+       rlwinm  r0,r5,32-7,31,31        /* _PAGE_DIRTY -> PP lsb */
+       and     r8,r8,r0                /* writable if _RW & _DIRTY */
+       rlwimi  r5,r5,32-1,30,30        /* _PAGE_USER -> PP msb */
+       rlwimi  r5,r5,32-2,31,31        /* _PAGE_USER -> PP lsb */
+       ori     r8,r8,0xe14             /* clear out reserved bits and M */
+       andc    r8,r5,r8                /* PP = user? (rw&dirty? 2: 3): 0 */
+BEGIN_FTR_SECTION
+       ori     r8,r8,_PAGE_COHERENT    /* set M (coherence required) */
+END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
+
+       /* Construct the high word of the PPC-style PTE (r5) */
+#ifndef CONFIG_PPC64BRIDGE
+       rlwinm  r5,r3,7,1,24            /* put VSID in 0x7fffff80 bits */
+       rlwimi  r5,r4,10,26,31          /* put in API (abbrev page index) */
+#else /* CONFIG_PPC64BRIDGE */
+       clrlwi  r3,r3,8                 /* reduce vsid to 24 bits */
+       sldi    r5,r3,12                /* shift vsid into position */
+       rlwimi  r5,r4,16,20,24          /* put in API (abbrev page index) */
+#endif /* CONFIG_PPC64BRIDGE */
+       SET_V(r5)                       /* set V (valid) bit */
+
+       /* Get the address of the primary PTE group in the hash table (r3) */
+_GLOBAL(hash_page_patch_A)
+       addis   r0,r7,Hash_base@h       /* base address of hash table */
+       rlwimi  r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
+       rlwinm  r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
+       xor     r3,r3,r0                /* make primary hash */
+       li      r0,8                    /* PTEs/group */
+
+       /*
+        * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
+        * if it is clear, meaning that the HPTE isn't there already...
+        */
+       andi.   r6,r6,_PAGE_HASHPTE
+       beq+    10f                     /* no PTE: go look for an empty slot */
+       tlbie   r4
+
+       addis   r4,r7,htab_hash_searches@ha
+       lwz     r6,htab_hash_searches@l(r4)
+       addi    r6,r6,1                 /* count how many searches we do */
+       stw     r6,htab_hash_searches@l(r4)
+
+       /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
+       mtctr   r0
+       addi    r4,r3,-PTE_SIZE
+1:     LDPTEu  r6,PTE_SIZE(r4)         /* get next PTE */
+       CMPPTE  0,r6,r5
+       bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
+       beq+    found_slot
+
+       /* Search the secondary PTEG for a matching PTE */
+       ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
+_GLOBAL(hash_page_patch_B)
+       xoris   r4,r3,Hash_msk>>16      /* compute secondary hash */
+       xori    r4,r4,(-PTEG_SIZE & 0xffff)
+       addi    r4,r4,-PTE_SIZE
+       mtctr   r0
+2:     LDPTEu  r6,PTE_SIZE(r4)
+       CMPPTE  0,r6,r5
+       bdnzf   2,2b
+       beq+    found_slot
+       xori    r5,r5,PTE_H             /* clear H bit again */
+
+       /* Search the primary PTEG for an empty slot */
+10:    mtctr   r0
+       addi    r4,r3,-PTE_SIZE         /* search primary PTEG */
+1:     LDPTEu  r6,PTE_SIZE(r4)         /* get next PTE */
+       TST_V(r6)                       /* test valid bit */
+       bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
+       beq+    found_empty
+
+       /* update counter of times that the primary PTEG is full */
+       addis   r4,r7,primary_pteg_full@ha
+       lwz     r6,primary_pteg_full@l(r4)
+       addi    r6,r6,1
+       stw     r6,primary_pteg_full@l(r4)
+
+       /* Search the secondary PTEG for an empty slot */
+       ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
+_GLOBAL(hash_page_patch_C)
+       xoris   r4,r3,Hash_msk>>16      /* compute secondary hash */
+       xori    r4,r4,(-PTEG_SIZE & 0xffff)
+       addi    r4,r4,-PTE_SIZE
+       mtctr   r0
+2:     LDPTEu  r6,PTE_SIZE(r4)
+       TST_V(r6)
+       bdnzf   2,2b
+       beq+    found_empty
+       xori    r5,r5,PTE_H             /* clear H bit again */
+
+       /*
+        * Choose an arbitrary slot in the primary PTEG to overwrite.
+        * Since both the primary and secondary PTEGs are full, and we
+        * have no information that the PTEs in the primary PTEG are
+        * more important or useful than those in the secondary PTEG,
+        * and we know there is a definite (although small) speed
+        * advantage to putting the PTE in the primary PTEG, we always
+        * put the PTE in the primary PTEG.
+        */
+       addis   r4,r7,next_slot@ha
+       lwz     r6,next_slot@l(r4)
+       addi    r6,r6,PTE_SIZE
+       andi.   r6,r6,7*PTE_SIZE
+       stw     r6,next_slot@l(r4)
+       add     r4,r3,r6
+
+#ifndef CONFIG_SMP
+       /* Store PTE in PTEG */
+found_empty:
+       STPTE   r5,0(r4)
+found_slot:
+       STPTE   r8,PTE_SIZE/2(r4)
+
+#else /* CONFIG_SMP */
+/*
+ * Between the tlbie above and updating the hash table entry below,
+ * another CPU could read the hash table entry and put it in its TLB.
+ * There are 3 cases:
+ * 1. using an empty slot
+ * 2. updating an earlier entry to change permissions (i.e. enable write)
+ * 3. taking over the PTE for an unrelated address
+ *
+ * In each case it doesn't really matter if the other CPUs have the old
+ * PTE in their TLB.  So we don't need to bother with another tlbie here,
+ * which is convenient as we've overwritten the register that had the
+ * address. :-)  The tlbie above is mainly to make sure that this CPU comes
+ * and gets the new PTE from the hash table.
+ *
+ * We do however have to make sure that the PTE is never in an invalid
+ * state with the V bit set.
+ */
+found_empty:
+found_slot:
+       CLR_V(r5,r0)            /* clear V (valid) bit in PTE */
+       STPTE   r5,0(r4)
+       sync
+       TLBSYNC
+       STPTE   r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
+       sync
+       SET_V(r5)
+       STPTE   r5,0(r4)        /* finally set V bit in PTE */
+#endif /* CONFIG_SMP */
+
+       sync            /* make sure pte updates get to memory */
+       blr
+
+       .comm   next_slot,4
+       .comm   primary_pteg_full,4
+       .comm   htab_hash_searches,4
+
+/*
+ * Flush the entry for a particular page from the hash table.
+ *
+ * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
+ *                 int count)
+ *
+ * We assume that there is a hash table in use (Hash != 0).
+ */
+_GLOBAL(flush_hash_pages)
+       tophys(r7,0)
+
+       /*
+        * We disable interrupts here, even on UP, because we want
+        * the _PAGE_HASHPTE bit to be a reliable indication of
+        * whether the HPTE exists (or at least whether one did once).
+        * We also turn off the MMU for data accesses so that we
+        * we can't take a hash table miss (assuming the code is
+        * covered by a BAT).  -- paulus
+        */
+       mfmsr   r10
+       SYNC
+       rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
+       rlwinm  r0,r0,0,28,26           /* clear MSR_DR */
+       mtmsr   r0
+       SYNC_601
+       isync
+
+       /* First find a PTE in the range that has _PAGE_HASHPTE set */
+       rlwimi  r5,r4,22,20,29
+1:     lwz     r0,0(r5)
+       cmpwi   cr1,r6,1
+       andi.   r0,r0,_PAGE_HASHPTE
+       bne     2f
+       ble     cr1,19f
+       addi    r4,r4,0x1000
+       addi    r5,r5,4
+       addi    r6,r6,-1
+       b       1b
+
+       /* Convert context and va to VSID */
+2:     mulli   r3,r3,897*16            /* multiply context by context skew */
+       rlwinm  r0,r4,4,28,31           /* get ESID (top 4 bits of va) */
+       mulli   r0,r0,0x111             /* multiply by ESID skew */
+       add     r3,r3,r0                /* note code below trims to 24 bits */
+
+       /* Construct the high word of the PPC-style PTE (r11) */
+#ifndef CONFIG_PPC64BRIDGE
+       rlwinm  r11,r3,7,1,24           /* put VSID in 0x7fffff80 bits */
+       rlwimi  r11,r4,10,26,31         /* put in API (abbrev page index) */
+#else /* CONFIG_PPC64BRIDGE */
+       clrlwi  r3,r3,8                 /* reduce vsid to 24 bits */
+       sldi    r11,r3,12               /* shift vsid into position */
+       rlwimi  r11,r4,16,20,24         /* put in API (abbrev page index) */
+#endif /* CONFIG_PPC64BRIDGE */
+       SET_V(r11)                      /* set V (valid) bit */
+
+#ifdef CONFIG_SMP
+       addis   r9,r7,mmu_hash_lock@ha
+       addi    r9,r9,mmu_hash_lock@l
+       rlwinm  r8,r1,0,0,18
+       add     r8,r8,r7
+       lwz     r8,TI_CPU(r8)
+       oris    r8,r8,9
+10:    lwarx   r0,0,r9
+       cmpi    0,r0,0
+       bne-    11f
+       stwcx.  r8,0,r9
+       beq+    12f
+11:    lwz     r0,0(r9)
+       cmpi    0,r0,0
+       beq     10b
+       b       11b
+12:    isync
+#endif
+
+       /*
+        * Check the _PAGE_HASHPTE bit in the linux PTE.  If it is
+        * already clear, we're done (for this pte).  If not,
+        * clear it (atomically) and proceed.  -- paulus.
+        */
+33:    lwarx   r8,0,r5                 /* fetch the pte */
+       andi.   r0,r8,_PAGE_HASHPTE
+       beq     8f                      /* done if HASHPTE is already clear */
+       rlwinm  r8,r8,0,31,29           /* clear HASHPTE bit */
+       stwcx.  r8,0,r5                 /* update the pte */
+       bne-    33b
+
+       /* Get the address of the primary PTE group in the hash table (r3) */
+_GLOBAL(flush_hash_patch_A)
+       addis   r8,r7,Hash_base@h       /* base address of hash table */
+       rlwimi  r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
+       rlwinm  r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
+       xor     r8,r0,r8                /* make primary hash */
+
+       /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
+       li      r0,8                    /* PTEs/group */
+       mtctr   r0
+       addi    r12,r8,-PTE_SIZE
+1:     LDPTEu  r0,PTE_SIZE(r12)        /* get next PTE */
+       CMPPTE  0,r0,r11
+       bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
+       beq+    3f
+
+       /* Search the secondary PTEG for a matching PTE */
+       ori     r11,r11,PTE_H           /* set H (secondary hash) bit */
+       li      r0,8                    /* PTEs/group */
+_GLOBAL(flush_hash_patch_B)
+       xoris   r12,r8,Hash_msk>>16     /* compute secondary hash */
+       xori    r12,r12,(-PTEG_SIZE & 0xffff)
+       addi    r12,r12,-PTE_SIZE
+       mtctr   r0
+2:     LDPTEu  r0,PTE_SIZE(r12)
+       CMPPTE  0,r0,r11
+       bdnzf   2,2b
+       xori    r11,r11,PTE_H           /* clear H again */
+       bne-    4f                      /* should rarely fail to find it */
+
+3:     li      r0,0
+       STPTE   r0,0(r12)               /* invalidate entry */
+4:     sync
+       tlbie   r4                      /* in hw tlb too */
+       sync
+
+8:     ble     cr1,9f                  /* if all ptes checked */
+81:    addi    r6,r6,-1
+       addi    r5,r5,4                 /* advance to next pte */
+       addi    r4,r4,0x1000
+       lwz     r0,0(r5)                /* check next pte */
+       cmpwi   cr1,r6,1
+       andi.   r0,r0,_PAGE_HASHPTE
+       bne     33b
+       bgt     cr1,81b
+
+9:
+#ifdef CONFIG_SMP
+       TLBSYNC
+       li      r0,0
+       stw     r0,0(r9)                /* clear mmu_hash_lock */
+#endif
+
+19:    mtmsr   r10
+       SYNC_601
+       isync
+       blr
similarity index 99%
rename from arch/ppc64/mm/hash_low.S
rename to arch/powerpc/mm/hash_low_64.S
index ee5a5d3..d6ed910 100644 (file)
@@ -10,7 +10,7 @@
  * described in the kernel's COPYING file.
  */
 
-#include <asm/processor.h>
+#include <asm/reg.h>
 #include <asm/pgtable.h>
 #include <asm/mmu.h>
 #include <asm/page.h>
similarity index 96%
rename from arch/ppc64/mm/hash_native.c
rename to arch/powerpc/mm/hash_native_64.c
index bfd385b..174d145 100644 (file)
@@ -335,10 +335,9 @@ static void native_hpte_clear(void)
        local_irq_restore(flags);
 }
 
-static void native_flush_hash_range(unsigned long context,
-                                   unsigned long number, int local)
+static void native_flush_hash_range(unsigned long number, int local)
 {
-       unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
+       unsigned long va, vpn, hash, secondary, slot, flags, avpn;
        int i, j;
        hpte_t *hptep;
        unsigned long hpte_v;
@@ -349,13 +348,7 @@ static void native_flush_hash_range(unsigned long context,
 
        j = 0;
        for (i = 0; i < number; i++) {
-               if (batch->addr[i] < KERNELBASE)
-                       vsid = get_vsid(context, batch->addr[i]);
-               else
-                       vsid = get_kernel_vsid(batch->addr[i]);
-
-               va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
-               batch->vaddr[j] = va;
+               va = batch->vaddr[j];
                if (large)
                        vpn = va >> HPAGE_SHIFT;
                else
similarity index 89%
rename from arch/ppc64/mm/hash_utils.c
rename to arch/powerpc/mm/hash_utils_64.c
index 09475c8..6e9e05c 100644 (file)
@@ -78,7 +78,7 @@ extern unsigned long dart_tablebase;
 hpte_t *htab_address;
 unsigned long htab_hash_mask;
 
-extern unsigned long _SDR1;
+unsigned long _SDR1;
 
 #define KB (1024)
 #define MB (1024*KB)
@@ -90,7 +90,6 @@ static inline void loop_forever(void)
                ;
 }
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 static inline void create_pte_mapping(unsigned long start, unsigned long end,
                                      unsigned long mode, int large)
 {
@@ -111,7 +110,7 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
                unsigned long vpn, hash, hpteg;
                unsigned long vsid = get_kernel_vsid(addr);
                unsigned long va = (vsid << 28) | (addr & 0xfffffff);
-               int ret;
+               int ret = -1;
 
                if (large)
                        vpn = va >> HPAGE_SHIFT;
@@ -129,16 +128,25 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
 
                hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 
+#ifdef CONFIG_PPC_ISERIES
+               if (systemcfg->platform & PLATFORM_ISERIES_LPAR)
+                       ret = iSeries_hpte_bolt_or_insert(hpteg, va,
+                               virt_to_abs(addr) >> PAGE_SHIFT,
+                               vflags, tmp_mode);
+               else
+#endif
 #ifdef CONFIG_PPC_PSERIES
                if (systemcfg->platform & PLATFORM_LPAR)
                        ret = pSeries_lpar_hpte_insert(hpteg, va,
                                virt_to_abs(addr) >> PAGE_SHIFT,
                                vflags, tmp_mode);
                else
-#endif /* CONFIG_PPC_PSERIES */
+#endif
+#ifdef CONFIG_PPC_MULTIPLATFORM
                        ret = native_hpte_insert(hpteg, va,
                                virt_to_abs(addr) >> PAGE_SHIFT,
                                vflags, tmp_mode);
+#endif
 
                if (ret == -1) {
                        ppc64_terminate_msg(0x20, "create_pte_mapping");
@@ -147,6 +155,27 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
        }
 }
 
+static unsigned long get_hashtable_size(void)
+{
+       unsigned long rnd_mem_size, pteg_count;
+
+       /* If hash size wasn't obtained in prom.c, we calculate it now based on
+        * the total RAM size
+        */
+       if (ppc64_pft_size)
+               return 1UL << ppc64_pft_size;
+
+       /* round mem_size up to next power of 2 */
+       rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
+       if (rnd_mem_size < systemcfg->physicalMemorySize)
+               rnd_mem_size <<= 1;
+
+       /* # pages / 2 */
+       pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
+
+       return pteg_count << 7;
+}
+
 void __init htab_initialize(void)
 {
        unsigned long table, htab_size_bytes;
@@ -162,7 +191,7 @@ void __init htab_initialize(void)
         * Calculate the required size of the htab.  We want the number of
         * PTEGs to equal one half the number of real pages.
         */ 
-       htab_size_bytes = 1UL << ppc64_pft_size;
+       htab_size_bytes = get_hashtable_size();
        pteg_count = htab_size_bytes >> 7;
 
        /* For debug, make the HTAB 1/8 as big as it normally would be. */
@@ -261,7 +290,6 @@ void __init htab_initialize(void)
 }
 #undef KB
 #undef MB
-#endif /* CONFIG_PPC_MULTIPLATFORM */
 
 /*
  * Called by asm hashtable.S for doing lazy icache flush
@@ -355,18 +383,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        return ret;
 }
 
-void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
-                    int local)
+void flush_hash_page(unsigned long va, pte_t pte, int local)
 {
-       unsigned long vsid, vpn, va, hash, secondary, slot;
+       unsigned long vpn, hash, secondary, slot;
        unsigned long huge = pte_huge(pte);
 
-       if (ea < KERNELBASE)
-               vsid = get_vsid(context, ea);
-       else
-               vsid = get_kernel_vsid(ea);
-
-       va = (vsid << 28) | (ea & 0x0fffffff);
        if (huge)
                vpn = va >> HPAGE_SHIFT;
        else
@@ -381,17 +402,17 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
        ppc_md.hpte_invalidate(slot, va, huge, local);
 }
 
-void flush_hash_range(unsigned long context, unsigned long number, int local)
+void flush_hash_range(unsigned long number, int local)
 {
        if (ppc_md.flush_hash_range) {
-               ppc_md.flush_hash_range(context, number, local);
+               ppc_md.flush_hash_range(number, local);
        } else {
                int i;
-               struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+               struct ppc64_tlb_batch *batch =
+                       &__get_cpu_var(ppc64_tlb_batch);
 
                for (i = 0; i < number; i++)
-                       flush_hash_page(context, batch->addr[i], batch->pte[i],
-                                       local);
+                       flush_hash_page(batch->vaddr[i], batch->pte[i], local);
        }
 }
 
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
new file mode 100644 (file)
index 0000000..aa6a544
--- /dev/null
@@ -0,0 +1,252 @@
+/*
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/initrd.h>
+#include <linux/pagemap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/btext.h>
+#include <asm/tlb.h>
+#include <asm/bootinfo.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#include <asm/sections.h>
+
+#include "mmu_decl.h"
+
+#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
+/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
+#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
+#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
+#endif
+#endif
+#define MAX_LOW_MEM    CONFIG_LOWMEM_SIZE
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+unsigned long total_memory;
+unsigned long total_lowmem;
+
+unsigned long ppc_memstart;
+unsigned long ppc_memoffset = PAGE_OFFSET;
+
+int boot_mapsize;
+#ifdef CONFIG_PPC_PMAC
+unsigned long agp_special_page;
+EXPORT_SYMBOL(agp_special_page);
+#endif
+
+#ifdef CONFIG_HIGHMEM
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
+EXPORT_SYMBOL(kmap_prot);
+EXPORT_SYMBOL(kmap_pte);
+#endif
+
+void MMU_init(void);
+
+/* XXX should be in current.h  -- paulus */
+extern struct task_struct *current_set[NR_CPUS];
+
+char *klimit = _end;
+struct device_node *memory_node;
+
+extern int init_bootmem_done;
+
+/*
+ * this tells the system to map all of ram with the segregs
+ * (i.e. page tables) instead of the bats.
+ * -- Cort
+ */
+int __map_without_bats;
+int __map_without_ltlbs;
+
+/* max amount of low RAM to map in */
+unsigned long __max_low_memory = MAX_LOW_MEM;
+
+/*
+ * limit of what is accessible with initial MMU setup -
+ * 256MB usually, but only 16MB on 601.
+ */
+unsigned long __initial_memory_limit = 0x10000000;
+
+/*
+ * Check for command-line options that affect what MMU_init will do.
+ */
+void MMU_setup(void)
+{
+       /* Check for nobats option (used in mapin_ram). */
+       if (strstr(cmd_line, "nobats")) {
+               __map_without_bats = 1;
+       }
+
+       if (strstr(cmd_line, "noltlbs")) {
+               __map_without_ltlbs = 1;
+       }
+}
+
+/*
+ * MMU_init sets up the basic memory mappings for the kernel,
+ * including both RAM and possibly some I/O regions,
+ * and sets up the page tables and the MMU hardware ready to go.
+ */
+void __init MMU_init(void)
+{
+       if (ppc_md.progress)
+               ppc_md.progress("MMU:enter", 0x111);
+
+       /* 601 can only access 16MB at the moment */
+       if (PVR_VER(mfspr(SPRN_PVR)) == 1)
+               __initial_memory_limit = 0x01000000;
+
+       /* parse args from command line */
+       MMU_setup();
+
+       if (lmb.memory.cnt > 1) {
+               lmb.memory.cnt = 1;
+               lmb_analyze();
+               printk(KERN_WARNING "Only using first contiguous memory region");
+       }
+
+       total_memory = lmb_end_of_DRAM();
+       total_lowmem = total_memory;
+
+#ifdef CONFIG_FSL_BOOKE
+       /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
+        * entries, so we need to adjust lowmem to match the amount we can map
+        * in the fixed entries */
+       adjust_total_lowmem();
+#endif /* CONFIG_FSL_BOOKE */
+       if (total_lowmem > __max_low_memory) {
+               total_lowmem = __max_low_memory;
+#ifndef CONFIG_HIGHMEM
+               total_memory = total_lowmem;
+#endif /* CONFIG_HIGHMEM */
+       }
+
+       /* Initialize the MMU hardware */
+       if (ppc_md.progress)
+               ppc_md.progress("MMU:hw init", 0x300);
+       MMU_init_hw();
+
+       /* Map in all of RAM starting at KERNELBASE */
+       if (ppc_md.progress)
+               ppc_md.progress("MMU:mapin", 0x301);
+       mapin_ram();
+
+#ifdef CONFIG_HIGHMEM
+       ioremap_base = PKMAP_BASE;
+#else
+       ioremap_base = 0xfe000000UL;    /* for now, could be 0xfffff000 */
+#endif /* CONFIG_HIGHMEM */
+       ioremap_bot = ioremap_base;
+
+       /* Map in I/O resources */
+       if (ppc_md.progress)
+               ppc_md.progress("MMU:setio", 0x302);
+       if (ppc_md.setup_io_mappings)
+               ppc_md.setup_io_mappings();
+
+       /* Initialize the context management stuff */
+       mmu_context_init();
+
+       if (ppc_md.progress)
+               ppc_md.progress("MMU:exit", 0x211);
+}
+
+/* This is only called until mem_init is done. */
+void __init *early_get_page(void)
+{
+       void *p;
+
+       if (init_bootmem_done) {
+               p = alloc_bootmem_pages(PAGE_SIZE);
+       } else {
+               p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
+                                       __initial_memory_limit));
+       }
+       return p;
+}
+
+/* Free up now-unused memory */
+static void free_sec(unsigned long start, unsigned long end, const char *name)
+{
+       unsigned long cnt = 0;
+
+       while (start < end) {
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
+               free_page(start);
+               cnt++;
+               start += PAGE_SIZE;
+       }
+       if (cnt) {
+               printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
+               totalram_pages += cnt;
+       }
+}
+
+void free_initmem(void)
+{
+#define FREESEC(TYPE) \
+       free_sec((unsigned long)(&__ ## TYPE ## _begin), \
+                (unsigned long)(&__ ## TYPE ## _end), \
+                #TYPE);
+
+       printk ("Freeing unused kernel memory:");
+       FREESEC(init);
+       printk("\n");
+       ppc_md.progress = NULL;
+#undef FREESEC
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+       if (start < end)
+               printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+       for (; start < end; start += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
+               free_page(start);
+               totalram_pages++;
+       }
+}
+#endif
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
new file mode 100644 (file)
index 0000000..b0fc822
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  Dave Engebretsen <engebret@us.ibm.com>
+ *      Rework for PPC64 port.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/nodemask.h>
+#include <linux/module.h>
+
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#include <asm/rtas.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/tlb.h>
+#include <asm/eeh.h>
+#include <asm/processor.h>
+#include <asm/mmzone.h>
+#include <asm/cputable.h>
+#include <asm/ppcdebug.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/iommu.h>
+#include <asm/abs_addr.h>
+#include <asm/vdso.h>
+#include <asm/imalloc.h>
+
+#if PGTABLE_RANGE > USER_VSID_RANGE
+#warning Limited user VSID range means pagetable space is wasted
+#endif
+
+#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
+#warning TASK_SIZE is smaller than it needs to be.
+#endif
+
+unsigned long klimit = (unsigned long)_end;
+
+/* max amount of RAM to use */
+unsigned long __max_memory;
+
+/* info on what we think the IO hole is */
+unsigned long  io_hole_start;
+unsigned long  io_hole_size;
+
+/*
+ * Do very early mm setup.
+ */
+void __init mm_init_ppc64(void)
+{
+#ifndef CONFIG_PPC_ISERIES
+       unsigned long i;
+#endif
+
+       ppc64_boot_msg(0x100, "MM Init");
+
+       /* This is the story of the IO hole... please, keep seated,
+        * unfortunately, we are out of oxygen masks at the moment.
+        * So we need some rough way to tell where your big IO hole
+        * is. On pmac, it's between 2G and 4G, on POWER3, it's around
+        * that area as well, on POWER4 we don't have one, etc...
+        * We need that as a "hint" when sizing the TCE table on POWER3
+        * So far, the simplest way that seem work well enough for us it
+        * to just assume that the first discontinuity in our physical
+        * RAM layout is the IO hole. That may not be correct in the future
+        * (and isn't on iSeries but then we don't care ;)
+        */
+
+#ifndef CONFIG_PPC_ISERIES
+       for (i = 1; i < lmb.memory.cnt; i++) {
+               unsigned long base, prevbase, prevsize;
+
+               prevbase = lmb.memory.region[i-1].base;
+               prevsize = lmb.memory.region[i-1].size;
+               base = lmb.memory.region[i].base;
+               if (base > (prevbase + prevsize)) {
+                       io_hole_start = prevbase + prevsize;
+                       io_hole_size = base  - (prevbase + prevsize);
+                       break;
+               }
+       }
+#endif /* CONFIG_PPC_ISERIES */
+       if (io_hole_start)
+               printk("IO Hole assumed to be %lx -> %lx\n",
+                      io_hole_start, io_hole_start + io_hole_size - 1);
+
+       ppc64_boot_msg(0x100, "MM Init Done");
+}
+
+void free_initmem(void)
+{
+       unsigned long addr;
+
+       addr = (unsigned long)__init_begin;
+       for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
+               memset((void *)addr, 0xcc, PAGE_SIZE);
+               ClearPageReserved(virt_to_page(addr));
+               set_page_count(virt_to_page(addr), 1);
+               free_page(addr);
+               totalram_pages++;
+       }
+       printk ("Freeing unused kernel memory: %luk freed\n",
+               ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+       if (start < end)
+               printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+       for (; start < end; start += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
+               free_page(start);
+               totalram_pages++;
+       }
+}
+#endif
+
+static struct kcore_list kcore_vmem;
+
+static int __init setup_kcore(void)
+{
+       int i;
+
+       for (i=0; i < lmb.memory.cnt; i++) {
+               unsigned long base, size;
+               struct kcore_list *kcore_mem;
+
+               base = lmb.memory.region[i].base;
+               size = lmb.memory.region[i].size;
+
+               /* GFP_ATOMIC to avoid might_sleep warnings during boot */
+               kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
+               if (!kcore_mem)
+                       panic("mem_init: kmalloc failed\n");
+
+               kclist_add(kcore_mem, __va(base), size);
+       }
+
+       kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
+
+       return 0;
+}
+module_init(setup_kcore);
+
+static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+{
+       memset(addr, 0, kmem_cache_size(cache));
+}
+
+static const int pgtable_cache_size[2] = {
+       PTE_TABLE_SIZE, PMD_TABLE_SIZE
+};
+static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
+       "pgd_pte_cache", "pud_pmd_cache",
+};
+
+kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
+
+void pgtable_cache_init(void)
+{
+       int i;
+
+       BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
+       BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
+       BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
+       BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
+
+       for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
+               int size = pgtable_cache_size[i];
+               const char *name = pgtable_cache_name[i];
+
+               pgtable_cache[i] = kmem_cache_create(name,
+                                                    size, size,
+                                                    SLAB_HWCACHE_ALIGN
+                                                    | SLAB_MUST_HWCACHE_ALIGN,
+                                                    zero_ctor,
+                                                    NULL);
+               if (! pgtable_cache[i])
+                       panic("pgtable_cache_init(): could not create %s!\n",
+                             name);
+       }
+}
similarity index 69%
rename from arch/ppc64/kernel/lmb.c
rename to arch/powerpc/mm/lmb.c
index 5adaca2..9b5aa68 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Procedures for interfacing to Open Firmware.
+ * Procedures for maintaining information about logical memory blocks.
  *
  * Peter Bergner, IBM Corp.    June 2001.
  * Copyright (C) 2001 Peter Bergner.
@@ -18,7 +18,9 @@
 #include <asm/page.h>
 #include <asm/prom.h>
 #include <asm/lmb.h>
-#include <asm/abs_addr.h>
+#ifdef CONFIG_PPC32
+#include "mmu_decl.h"          /* for __max_low_memory */
+#endif
 
 struct lmb lmb;
 
@@ -54,16 +56,14 @@ void lmb_dump_all(void)
 #endif /* DEBUG */
 }
 
-static unsigned long __init
-lmb_addrs_overlap(unsigned long base1, unsigned long size1,
-                  unsigned long base2, unsigned long size2)
+static unsigned long __init lmb_addrs_overlap(unsigned long base1,
+               unsigned long size1, unsigned long base2, unsigned long size2)
 {
        return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
 }
 
-static long __init
-lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
-                  unsigned long base2, unsigned long size2)
+static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
+               unsigned long base2, unsigned long size2)
 {
        if (base2 == base1 + size1)
                return 1;
@@ -73,8 +73,8 @@ lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
        return 0;
 }
 
-static long __init
-lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
+static long __init lmb_regions_adjacent(struct lmb_region *rgn,
+               unsigned long r1, unsigned long r2)
 {
        unsigned long base1 = rgn->region[r1].base;
        unsigned long size1 = rgn->region[r1].size;
@@ -85,8 +85,8 @@ lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
 }
 
 /* Assumption: base addr of region 1 < base addr of region 2 */
-static void __init
-lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
+static void __init lmb_coalesce_regions(struct lmb_region *rgn,
+               unsigned long r1, unsigned long r2)
 {
        unsigned long i;
 
@@ -99,8 +99,7 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
 }
 
 /* This routine called with relocation disabled. */
-void __init
-lmb_init(void)
+void __init lmb_init(void)
 {
        /* Create a dummy zero size LMB which will get coalesced away later.
         * This simplifies the lmb_add() code below...
@@ -115,9 +114,8 @@ lmb_init(void)
        lmb.reserved.cnt = 1;
 }
 
-/* This routine called with relocation disabled. */
-void __init
-lmb_analyze(void)
+/* This routine may be called with relocation disabled. */
+void __init lmb_analyze(void)
 {
        int i;
 
@@ -128,8 +126,8 @@ lmb_analyze(void)
 }
 
 /* This routine called with relocation disabled. */
-static long __init
-lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
+static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
+                                 unsigned long size)
 {
        unsigned long i, coalesced = 0;
        long adjacent;
@@ -158,18 +156,17 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
                coalesced++;
        }
 
-       if ( coalesced ) {
+       if (coalesced)
                return coalesced;
-       } else if ( rgn->cnt >= MAX_LMB_REGIONS ) {
+       if (rgn->cnt >= MAX_LMB_REGIONS)
                return -1;
-       }
 
        /* Couldn't coalesce the LMB, so add it to the sorted table. */
-       for (i=rgn->cnt-1; i >= 0; i--) {
+       for (i = rgn->cnt-1; i >= 0; i--) {
                if (base < rgn->region[i].base) {
                        rgn->region[i+1].base = rgn->region[i].base;
                        rgn->region[i+1].size = rgn->region[i].size;
-               }  else {
+               } else {
                        rgn->region[i+1].base = base;
                        rgn->region[i+1].size = size;
                        break;
@@ -180,30 +177,28 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
        return 0;
 }
 
-/* This routine called with relocation disabled. */
-long __init
-lmb_add(unsigned long base, unsigned long size)
+/* This routine may be called with relocation disabled. */
+long __init lmb_add(unsigned long base, unsigned long size)
 {
        struct lmb_region *_rgn = &(lmb.memory);
 
        /* On pSeries LPAR systems, the first LMB is our RMO region. */
-       if ( base == 0 )
+       if (base == 0)
                lmb.rmo_size = size;
 
        return lmb_add_region(_rgn, base, size);
 
 }
 
-long __init
-lmb_reserve(unsigned long base, unsigned long size)
+long __init lmb_reserve(unsigned long base, unsigned long size)
 {
        struct lmb_region *_rgn = &(lmb.reserved);
 
        return lmb_add_region(_rgn, base, size);
 }
 
-long __init
-lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
+long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
+                               unsigned long size)
 {
        unsigned long i;
 
@@ -218,39 +213,44 @@ lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long si
        return (i < rgn->cnt) ? i : -1;
 }
 
-unsigned long __init
-lmb_alloc(unsigned long size, unsigned long align)
+unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
 {
        return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
 }
 
-unsigned long __init
-lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
+unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
+                                   unsigned long max_addr)
 {
        long i, j;
        unsigned long base = 0;
 
-       for (i=lmb.memory.cnt-1; i >= 0; i--) {
+#ifdef CONFIG_PPC32
+       /* On 32-bit, make sure we allocate lowmem */
+       if (max_addr == LMB_ALLOC_ANYWHERE)
+               max_addr = __max_low_memory;
+#endif
+       for (i = lmb.memory.cnt-1; i >= 0; i--) {
                unsigned long lmbbase = lmb.memory.region[i].base;
                unsigned long lmbsize = lmb.memory.region[i].size;
 
-               if ( max_addr == LMB_ALLOC_ANYWHERE )
-                       base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
-               else if ( lmbbase < max_addr )
-                       base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align);
-               else
+               if (max_addr == LMB_ALLOC_ANYWHERE)
+                       base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
+               else if (lmbbase < max_addr) {
+                       base = min(lmbbase + lmbsize, max_addr);
+                       base = _ALIGN_DOWN(base - size, align);
+               } else
                        continue;
 
-               while ( (lmbbase <= base) &&
-                       ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) {
-                       base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align);
-               }
+               while ((lmbbase <= base) &&
+                      ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
+                       base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
+                                          align);
 
-               if ( (base != 0) && (lmbbase <= base) )
+               if ((base != 0) && (lmbbase <= base))
                        break;
        }
 
-       if ( i < 0 )
+       if (i < 0)
                return 0;
 
        lmb_add_region(&lmb.reserved, base, size);
@@ -259,14 +259,12 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
 }
 
 /* You must call lmb_analyze() before this. */
-unsigned long __init
-lmb_phys_mem_size(void)
+unsigned long __init lmb_phys_mem_size(void)
 {
        return lmb.memory.size;
 }
 
-unsigned long __init
-lmb_end_of_DRAM(void)
+unsigned long __init lmb_end_of_DRAM(void)
 {
        int idx = lmb.memory.cnt - 1;
 
@@ -277,9 +275,8 @@ lmb_end_of_DRAM(void)
  * Truncate the lmb list to memory_limit if it's set
  * You must call lmb_analyze() after this.
  */
-void __init lmb_enforce_memory_limit(void)
+void __init lmb_enforce_memory_limit(unsigned long memory_limit)
 {
-       extern unsigned long memory_limit;
        unsigned long i, limit;
 
        if (! memory_limit)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
new file mode 100644 (file)
index 0000000..5e92067
--- /dev/null
@@ -0,0 +1,484 @@
+/*
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/initrd.h>
+#include <linux/pagemap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/btext.h>
+#include <asm/tlb.h>
+#include <asm/bootinfo.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#include <asm/sections.h>
+#ifdef CONFIG_PPC64
+#include <asm/vdso.h>
+#endif
+
+#include "mmu_decl.h"
+
+#ifndef CPU_FTR_COHERENT_ICACHE
+#define CPU_FTR_COHERENT_ICACHE        0       /* XXX for now */
+#define CPU_FTR_NOEXECUTE      0
+#endif
+
+int init_bootmem_done;
+int mem_init_done;
+
+/*
+ * This is called by /dev/mem to know if a given address has to
+ * be mapped non-cacheable or not
+ */
+int page_is_ram(unsigned long pfn)
+{
+       unsigned long paddr = (pfn << PAGE_SHIFT);
+
+#ifndef CONFIG_PPC64   /* XXX for now */
+       return paddr < __pa(high_memory);
+#else
+       int i;
+       for (i=0; i < lmb.memory.cnt; i++) {
+               unsigned long base;
+
+               base = lmb.memory.region[i].base;
+
+               if ((paddr >= base) &&
+                       (paddr < (base + lmb.memory.region[i].size))) {
+                       return 1;
+               }
+       }
+
+       return 0;
+#endif
+}
+EXPORT_SYMBOL(page_is_ram);
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
+                             unsigned long size, pgprot_t vma_prot)
+{
+       if (ppc_md.phys_mem_access_prot)
+               return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
+
+       if (!page_is_ram(addr >> PAGE_SHIFT))
+               vma_prot = __pgprot(pgprot_val(vma_prot)
+                                   | _PAGE_GUARDED | _PAGE_NO_CACHE);
+       return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+
+void show_mem(void)
+{
+       unsigned long total = 0, reserved = 0;
+       unsigned long shared = 0, cached = 0;
+       unsigned long highmem = 0;
+       struct page *page;
+       pg_data_t *pgdat;
+       unsigned long i;
+
+       printk("Mem-info:\n");
+       show_free_areas();
+       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+       for_each_pgdat(pgdat) {
+               for (i = 0; i < pgdat->node_spanned_pages; i++) {
+                       page = pgdat_page_nr(pgdat, i);
+                       total++;
+                       if (PageHighMem(page))
+                               highmem++;
+                       if (PageReserved(page))
+                               reserved++;
+                       else if (PageSwapCache(page))
+                               cached++;
+                       else if (page_count(page))
+                               shared += page_count(page) - 1;
+               }
+       }
+       printk("%ld pages of RAM\n", total);
+#ifdef CONFIG_HIGHMEM
+       printk("%ld pages of HIGHMEM\n", highmem);
+#endif
+       printk("%ld reserved pages\n", reserved);
+       printk("%ld pages shared\n", shared);
+       printk("%ld pages swap cached\n", cached);
+}
+
+/*
+ * Initialize the bootmem system and give it all the memory we
+ * have available.  If we are using highmem, we only put the
+ * lowmem into the bootmem system.
+ */
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+void __init do_init_bootmem(void)
+{
+       unsigned long i;
+       unsigned long start, bootmap_pages;
+       unsigned long total_pages;
+       int boot_mapsize;
+
+       max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
+#ifdef CONFIG_HIGHMEM
+       total_pages = total_lowmem >> PAGE_SHIFT;
+#endif
+
+       /*
+        * Find an area to use for the bootmem bitmap.  Calculate the size of
+        * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
+        * Add 1 additional page in case the address isn't page-aligned.
+        */
+       bootmap_pages = bootmem_bootmap_pages(total_pages);
+
+       start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
+       BUG_ON(!start);
+
+       boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
+
+       /* Add all physical memory to the bootmem map, mark each area
+        * present.
+        */
+       for (i = 0; i < lmb.memory.cnt; i++) {
+               unsigned long base = lmb.memory.region[i].base;
+               unsigned long size = lmb_size_bytes(&lmb.memory, i);
+#ifdef CONFIG_HIGHMEM
+               if (base >= total_lowmem)
+                       continue;
+               if (base + size > total_lowmem)
+                       size = total_lowmem - base;
+#endif
+               free_bootmem(base, size);
+       }
+
+       /* reserve the sections we're already using */
+       for (i = 0; i < lmb.reserved.cnt; i++)
+               reserve_bootmem(lmb.reserved.region[i].base,
+                               lmb_size_bytes(&lmb.reserved, i));
+
+       /* XXX need to clip this if using highmem? */
+       for (i = 0; i < lmb.memory.cnt; i++)
+               memory_present(0, lmb_start_pfn(&lmb.memory, i),
+                              lmb_end_pfn(&lmb.memory, i));
+       init_bootmem_done = 1;
+}
+
+/*
+ * paging_init() sets up the page tables - in fact we've already done this.
+ */
+void __init paging_init(void)
+{
+       unsigned long zones_size[MAX_NR_ZONES];
+       unsigned long zholes_size[MAX_NR_ZONES];
+       unsigned long total_ram = lmb_phys_mem_size();
+       unsigned long top_of_ram = lmb_end_of_DRAM();
+
+#ifdef CONFIG_HIGHMEM
+       map_page(PKMAP_BASE, 0, 0);     /* XXX gross */
+       pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
+                       (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
+       map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
+       kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
+                       (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
+       kmap_prot = PAGE_KERNEL;
+#endif /* CONFIG_HIGHMEM */
+
+       printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
+              top_of_ram, total_ram);
+       printk(KERN_INFO "Memory hole size: %ldMB\n",
+              (top_of_ram - total_ram) >> 20);
+       /*
+        * All pages are DMA-able so we put them all in the DMA zone.
+        */
+       memset(zones_size, 0, sizeof(zones_size));
+       memset(zholes_size, 0, sizeof(zholes_size));
+
+       zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
+       zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
+
+#ifdef CONFIG_HIGHMEM
+       zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
+       zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
+       zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
+#else
+       zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
+       zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
+#endif /* CONFIG_HIGHMEM */
+
+       free_area_init_node(0, NODE_DATA(0), zones_size,
+                           __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
+}
+#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
+
+void __init mem_init(void)
+{
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       int nid;
+#endif
+       pg_data_t *pgdat;
+       unsigned long i;
+       struct page *page;
+       unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
+
+       num_physpages = max_pfn;        /* RAM is assumed contiguous */
+       high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+        for_each_online_node(nid) {
+               if (NODE_DATA(nid)->node_spanned_pages != 0) {
+                       printk("freeing bootmem node %x\n", nid);
+                       totalram_pages +=
+                               free_all_bootmem_node(NODE_DATA(nid));
+               }
+       }
+#else
+       max_mapnr = num_physpages;
+       totalram_pages += free_all_bootmem();
+#endif
+       for_each_pgdat(pgdat) {
+               for (i = 0; i < pgdat->node_spanned_pages; i++) {
+                       page = pgdat_page_nr(pgdat, i);
+                       if (PageReserved(page))
+                               reservedpages++;
+               }
+       }
+
+       codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
+       datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
+       initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
+       bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
+
+#ifdef CONFIG_HIGHMEM
+       {
+               unsigned long pfn, highmem_mapnr;
+
+               highmem_mapnr = total_lowmem >> PAGE_SHIFT;
+               for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
+                       struct page *page = pfn_to_page(pfn);
+
+                       ClearPageReserved(page);
+                       set_page_count(page, 1);
+                       __free_page(page);
+                       totalhigh_pages++;
+               }
+               totalram_pages += totalhigh_pages;
+               printk(KERN_INFO "High memory: %luk\n",
+                      totalhigh_pages << (PAGE_SHIFT-10));
+       }
+#endif /* CONFIG_HIGHMEM */
+
+       printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
+              "%luk reserved, %luk data, %luk bss, %luk init)\n",
+               (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
+               num_physpages << (PAGE_SHIFT-10),
+               codesize >> 10,
+               reservedpages << (PAGE_SHIFT-10),
+               datasize >> 10,
+               bsssize >> 10,
+               initsize >> 10);
+
+       mem_init_done = 1;
+
+#ifdef CONFIG_PPC64
+       /* Initialize the vDSO */
+       vdso_init();
+#endif
+}
+
+/*
+ * This is called when a page has been modified by the kernel.
+ * It just marks the page as not i-cache clean.  We do the i-cache
+ * flush later when the page is given to a user process, if necessary.
+ */
+void flush_dcache_page(struct page *page)
+{
+       if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+               return;
+       /* avoid an atomic op if possible */
+       if (test_bit(PG_arch_1, &page->flags))
+               clear_bit(PG_arch_1, &page->flags);
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void flush_dcache_icache_page(struct page *page)
+{
+#ifdef CONFIG_BOOKE
+       void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
+       __flush_dcache_icache(start);
+       kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
+       /* On 8xx there is no need to kmap since highmem is not supported */
+       __flush_dcache_icache(page_address(page)); 
+#else
+       __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
+#endif
+
+}
+void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
+{
+       clear_page(page);
+
+       if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+               return;
+       /*
+        * We shouldnt have to do this, but some versions of glibc
+        * require it (ld.so assumes zero filled pages are icache clean)
+        * - Anton
+        */
+
+       /* avoid an atomic op if possible */
+       if (test_bit(PG_arch_1, &pg->flags))
+               clear_bit(PG_arch_1, &pg->flags);
+}
+EXPORT_SYMBOL(clear_user_page);
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+                   struct page *pg)
+{
+       copy_page(vto, vfrom);
+
+       /*
+        * We should be able to use the following optimisation, however
+        * there are two problems.
+        * Firstly a bug in some versions of binutils meant PLT sections
+        * were not marked executable.
+        * Secondly the first word in the GOT section is blrl, used
+        * to establish the GOT address. Until recently the GOT was
+        * not marked executable.
+        * - Anton
+        */
+#if 0
+       if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
+               return;
+#endif
+
+       if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+               return;
+
+       /* avoid an atomic op if possible */
+       if (test_bit(PG_arch_1, &pg->flags))
+               clear_bit(PG_arch_1, &pg->flags);
+}
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+                            unsigned long addr, int len)
+{
+       unsigned long maddr;
+
+       maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
+       flush_icache_range(maddr, maddr + len);
+       kunmap(page);
+}
+EXPORT_SYMBOL(flush_icache_user_range);
+
+/*
+ * This is called at the end of handling a user page fault, when the
+ * fault has been handled by updating a PTE in the linux page tables.
+ * We use it to preload an HPTE into the hash table corresponding to
+ * the updated linux PTE.
+ * 
+ * This must always be called with the mm->page_table_lock held
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+                     pte_t pte)
+{
+       /* handle i-cache coherency */
+       unsigned long pfn = pte_pfn(pte);
+#ifdef CONFIG_PPC32
+       pmd_t *pmd;
+#else
+       unsigned long vsid;
+       void *pgdir;
+       pte_t *ptep;
+       int local = 0;
+       cpumask_t tmp;
+       unsigned long flags;
+#endif
+
+       /* handle i-cache coherency */
+       if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
+           !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+           pfn_valid(pfn)) {
+               struct page *page = pfn_to_page(pfn);
+               if (!PageReserved(page)
+                   && !test_bit(PG_arch_1, &page->flags)) {
+                       if (vma->vm_mm == current->active_mm) {
+#ifdef CONFIG_8xx
+                       /* On 8xx, cache control instructions (particularly 
+                        * "dcbst" from flush_dcache_icache) fault as write 
+                        * operation if there is an unpopulated TLB entry 
+                        * for the address in question. To workaround that, 
+                        * we invalidate the TLB here, thus avoiding dcbst 
+                        * misbehaviour.
+                        */
+                               _tlbie(address);
+#endif
+                               __flush_dcache_icache((void *) address);
+                       } else
+                               flush_dcache_icache_page(page);
+                       set_bit(PG_arch_1, &page->flags);
+               }
+       }
+
+#ifdef CONFIG_PPC_STD_MMU
+       /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
+       if (!pte_young(pte) || address >= TASK_SIZE)
+               return;
+#ifdef CONFIG_PPC32
+       if (Hash == 0)
+               return;
+       pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
+       if (!pmd_none(*pmd))
+               add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
+#else
+       pgdir = vma->vm_mm->pgd;
+       if (pgdir == NULL)
+               return;
+
+       ptep = find_linux_pte(pgdir, address);
+       if (!ptep)
+               return;
+
+       vsid = get_vsid(vma->vm_mm->context.id, address);
+
+       local_irq_save(flags);
+       tmp = cpumask_of_cpu(smp_processor_id());
+       if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
+               local = 1;
+
+       __hash_page(address, 0, vsid, ptep, 0x300, local);
+       local_irq_restore(flags);
+#endif
+#endif
+}
similarity index 100%
rename from arch/ppc64/mm/mmap.c
rename to arch/powerpc/mm/mmap.c
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
new file mode 100644 (file)
index 0000000..a8816e0
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU substantially follows the
+ * architecture specification.  This includes the 6xx, 7xx, 7xxx,
+ * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+mm_context_t next_mmu_context;
+unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+#ifdef FEW_CONTEXTS
+atomic_t nr_free_contexts;
+struct mm_struct *context_mm[LAST_CONTEXT+1];
+void steal_context(void);
+#endif /* FEW_CONTEXTS */
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init
+mmu_context_init(void)
+{
+       /*
+        * Some processors have too few contexts to reserve one for
+        * init_mm, and require using context 0 for a normal task.
+        * Other processors reserve the use of context zero for the kernel.
+        * This code assumes FIRST_CONTEXT < 32.
+        */
+       context_map[0] = (1 << FIRST_CONTEXT) - 1;
+       next_mmu_context = FIRST_CONTEXT;
+#ifdef FEW_CONTEXTS
+       atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
+#endif /* FEW_CONTEXTS */
+}
+
+#ifdef FEW_CONTEXTS
+/*
+ * Steal a context from a task that has one at the moment.
+ * This is only used on 8xx and 4xx and we presently assume that
+ * they don't do SMP.  If they do then this will have to check
+ * whether the MM we steal is in use.
+ * We also assume that this is only used on systems that don't
+ * use an MMU hash table - this is true for 8xx and 4xx.
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :).  This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ *  -- paulus
+ */
+void
+steal_context(void)
+{
+       struct mm_struct *mm;
+
+       /* free up context `next_mmu_context' */
+       /* if we shouldn't free context 0, don't... */
+       if (next_mmu_context < FIRST_CONTEXT)
+               next_mmu_context = FIRST_CONTEXT;
+       mm = context_mm[next_mmu_context];
+       flush_tlb_mm(mm);
+       destroy_context(mm);
+}
+#endif /* FEW_CONTEXTS */
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c
new file mode 100644 (file)
index 0000000..714a84d
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ *  MMU context allocation for 64-bit kernels.
+ *
+ *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+
+#include <asm/mmu_context.h>
+
+static DEFINE_SPINLOCK(mmu_context_lock);
+static DEFINE_IDR(mmu_context_idr);
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+       int index;
+       int err;
+
+again:
+       if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
+               return -ENOMEM;
+
+       spin_lock(&mmu_context_lock);
+       err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
+       spin_unlock(&mmu_context_lock);
+
+       if (err == -EAGAIN)
+               goto again;
+       else if (err)
+               return err;
+
+       if (index > MAX_CONTEXT) {
+               idr_remove(&mmu_context_idr, index);
+               return -ENOMEM;
+       }
+
+       mm->context.id = index;
+
+       return 0;
+}
+
+void destroy_context(struct mm_struct *mm)
+{
+       spin_lock(&mmu_context_lock);
+       idr_remove(&mmu_context_idr, mm->context.id);
+       spin_unlock(&mmu_context_lock);
+
+       mm->context.id = NO_CONTEXT;
+}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
new file mode 100644 (file)
index 0000000..a4d7a32
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Declarations of procedures and variables shared between files
+ * in arch/ppc/mm/.
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+#include <asm/tlbflush.h>
+#include <asm/mmu.h>
+
+#ifdef CONFIG_PPC32
+extern void mapin_ram(void);
+extern int map_page(unsigned long va, phys_addr_t pa, int flags);
+extern void setbat(int index, unsigned long virt, unsigned long phys,
+                  unsigned int size, int flags);
+extern void settlbcam(int index, unsigned long virt, phys_addr_t phys,
+                     unsigned int size, int flags, unsigned int pid);
+extern void invalidate_tlbcam_entry(int index);
+
+extern int __map_without_bats;
+extern unsigned long ioremap_base;
+extern unsigned long ioremap_bot;
+extern unsigned int rtas_data, rtas_size;
+
+extern PTE *Hash, *Hash_end;
+extern unsigned long Hash_size, Hash_mask;
+
+extern unsigned int num_tlbcam_entries;
+#endif
+
+extern unsigned long __max_low_memory;
+extern unsigned long __initial_memory_limit;
+extern unsigned long total_memory;
+extern unsigned long total_lowmem;
+
+/* ...and now those things that may be slightly different between processor
+ * architectures.  -- Dan
+ */
+#if defined(CONFIG_8xx)
+#define flush_HPTE(X, va, pg)  _tlbie(va)
+#define MMU_init_hw()          do { } while(0)
+#define mmu_mapin_ram()                (0UL)
+
+#elif defined(CONFIG_4xx)
+#define flush_HPTE(X, va, pg)  _tlbie(va)
+extern void MMU_init_hw(void);
+extern unsigned long mmu_mapin_ram(void);
+
+#elif defined(CONFIG_FSL_BOOKE)
+#define flush_HPTE(X, va, pg)  _tlbie(va)
+extern void MMU_init_hw(void);
+extern unsigned long mmu_mapin_ram(void);
+extern void adjust_total_lowmem(void);
+
+#elif defined(CONFIG_PPC32)
+/* anything 32-bit except 4xx or 8xx */
+extern void MMU_init_hw(void);
+extern unsigned long mmu_mapin_ram(void);
+
+/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
+ * which includes all new 82xx processors.  We need tlbie/tlbsync here
+ * in that case (I think). -- Dan.
+ */
+static inline void flush_HPTE(unsigned context, unsigned long va,
+                             unsigned long pdval)
+{
+       if ((Hash != 0) &&
+           cpu_has_feature(CPU_FTR_HPTE_TABLE))
+               flush_hash_pages(0, va, pdval, 1);
+       else
+               _tlbie(va);
+}
+#endif
similarity index 100%
rename from arch/ppc64/mm/numa.c
rename to arch/powerpc/mm/numa.c
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
new file mode 100644 (file)
index 0000000..5792e53
--- /dev/null
@@ -0,0 +1,469 @@
+/*
+ * This file contains the routines setting up the linux page tables.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/io.h>
+
+#include "mmu_decl.h"
+
+unsigned long ioremap_base;
+unsigned long ioremap_bot;
+int io_bat_index;
+
+#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
+#define HAVE_BATS      1
+#endif
+
+#if defined(CONFIG_FSL_BOOKE)
+#define HAVE_TLBCAM    1
+#endif
+
+extern char etext[], _stext[];
+
+#ifdef CONFIG_SMP
+extern void hash_page_sync(void);
+#endif
+
+#ifdef HAVE_BATS
+extern unsigned long v_mapped_by_bats(unsigned long va);
+extern unsigned long p_mapped_by_bats(unsigned long pa);
+void setbat(int index, unsigned long virt, unsigned long phys,
+           unsigned int size, int flags);
+
+#else /* !HAVE_BATS */
+#define v_mapped_by_bats(x)    (0UL)
+#define p_mapped_by_bats(x)    (0UL)
+#endif /* HAVE_BATS */
+
+#ifdef HAVE_TLBCAM
+extern unsigned int tlbcam_index;
+extern unsigned long v_mapped_by_tlbcam(unsigned long va);
+extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
+#else /* !HAVE_TLBCAM */
+#define v_mapped_by_tlbcam(x)  (0UL)
+#define p_mapped_by_tlbcam(x)  (0UL)
+#endif /* HAVE_TLBCAM */
+
+#ifdef CONFIG_PTE_64BIT
+/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
+#define PGDIR_ORDER    1
+#else
+#define PGDIR_ORDER    0
+#endif
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+       pgd_t *ret;
+
+       ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
+       return ret;
+}
+
+void pgd_free(pgd_t *pgd)
+{
+       free_pages((unsigned long)pgd, PGDIR_ORDER);
+}
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+       pte_t *pte;
+       extern int mem_init_done;
+       extern void *early_get_page(void);
+
+       if (mem_init_done) {
+               pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       } else {
+               pte = (pte_t *)early_get_page();
+               if (pte)
+                       clear_page(pte);
+       }
+       return pte;
+}
+
+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+       struct page *ptepage;
+
+#ifdef CONFIG_HIGHPTE
+       int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+#else
+       int flags = GFP_KERNEL | __GFP_REPEAT;
+#endif
+
+       ptepage = alloc_pages(flags, 0);
+       if (ptepage)
+               clear_highpage(ptepage);
+       return ptepage;
+}
+
+void pte_free_kernel(pte_t *pte)
+{
+#ifdef CONFIG_SMP
+       hash_page_sync();
+#endif
+       free_page((unsigned long)pte);
+}
+
+void pte_free(struct page *ptepage)
+{
+#ifdef CONFIG_SMP
+       hash_page_sync();
+#endif
+       __free_page(ptepage);
+}
+
+#ifndef CONFIG_PHYS_64BIT
+void __iomem *
+ioremap(phys_addr_t addr, unsigned long size)
+{
+       return __ioremap(addr, size, _PAGE_NO_CACHE);
+}
+#else /* CONFIG_PHYS_64BIT */
+void __iomem *
+ioremap64(unsigned long long addr, unsigned long size)
+{
+       return __ioremap(addr, size, _PAGE_NO_CACHE);
+}
+
+void __iomem *
+ioremap(phys_addr_t addr, unsigned long size)
+{
+       phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
+
+       return ioremap64(addr64, size);
+}
+#endif /* CONFIG_PHYS_64BIT */
+
+void __iomem *
+__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
+{
+       unsigned long v, i;
+       phys_addr_t p;
+       int err;
+
+       /*
+        * Choose an address to map it to.
+        * Once the vmalloc system is running, we use it.
+        * Before then, we use space going down from ioremap_base
+        * (ioremap_bot records where we're up to).
+        */
+       p = addr & PAGE_MASK;
+       size = PAGE_ALIGN(addr + size) - p;
+
+       /*
+        * If the address lies within the first 16 MB, assume it's in ISA
+        * memory space
+        */
+       if (p < 16*1024*1024)
+               p += _ISA_MEM_BASE;
+
+       /*
+        * Don't allow anybody to remap normal RAM that we're using.
+        * mem_init() sets high_memory so only do the check after that.
+        */
+       if (mem_init_done && (p < virt_to_phys(high_memory))) {
+               printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
+                      __builtin_return_address(0));
+               return NULL;
+       }
+
+       if (size == 0)
+               return NULL;
+
+       /*
+        * Is it already mapped?  Perhaps overlapped by a previous
+        * BAT mapping.  If the whole area is mapped then we're done,
+        * otherwise remap it since we want to keep the virt addrs for
+        * each request contiguous.
+        *
+        * We make the assumption here that if the bottom and top
+        * of the range we want are mapped then it's mapped to the
+        * same virt address (and this is contiguous).
+        *  -- Cort
+        */
+       if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
+               goto out;
+
+       if ((v = p_mapped_by_tlbcam(p)))
+               goto out;
+
+       if (mem_init_done) {
+               struct vm_struct *area;
+               area = get_vm_area(size, VM_IOREMAP);
+               if (area == 0)
+                       return NULL;
+               v = (unsigned long) area->addr;
+       } else {
+               v = (ioremap_bot -= size);
+       }
+
+       if ((flags & _PAGE_PRESENT) == 0)
+               flags |= _PAGE_KERNEL;
+       if (flags & _PAGE_NO_CACHE)
+               flags |= _PAGE_GUARDED;
+
+       /*
+        * Should check if it is a candidate for a BAT mapping
+        */
+
+       err = 0;
+       for (i = 0; i < size && err == 0; i += PAGE_SIZE)
+               err = map_page(v+i, p+i, flags);
+       if (err) {
+               if (mem_init_done)
+                       vunmap((void *)v);
+               return NULL;
+       }
+
+out:
+       return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+       /*
+        * If mapped by BATs then there is nothing to do.
+        * Calling vfree() generates a benign warning.
+        */
+       if (v_mapped_by_bats((unsigned long)addr)) return;
+
+       if (addr > high_memory && (unsigned long) addr < ioremap_bot)
+               vunmap((void *) (PAGE_MASK & (unsigned long)addr));
+}
+
+void __iomem *ioport_map(unsigned long port, unsigned int len)
+{
+       return (void __iomem *) (port + _IO_BASE);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+       /* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+int
+map_page(unsigned long va, phys_addr_t pa, int flags)
+{
+       pmd_t *pd;
+       pte_t *pg;
+       int err = -ENOMEM;
+
+       spin_lock(&init_mm.page_table_lock);
+       /* Use upper 10 bits of VA to index the first level map */
+       pd = pmd_offset(pgd_offset_k(va), va);
+       /* Use middle 10 bits of VA to index the second-level map */
+       pg = pte_alloc_kernel(&init_mm, pd, va);
+       if (pg != 0) {
+               err = 0;
+               set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
+               if (mem_init_done)
+                       flush_HPTE(0, va, pmd_val(*pd));
+       }
+       spin_unlock(&init_mm.page_table_lock);
+       return err;
+}
+
+/*
+ * Map in all of physical memory starting at KERNELBASE.
+ */
+void __init mapin_ram(void)
+{
+       unsigned long v, p, s, f;
+
+       s = mmu_mapin_ram();
+       v = KERNELBASE + s;
+       p = PPC_MEMSTART + s;
+       for (; s < total_lowmem; s += PAGE_SIZE) {
+               if ((char *) v >= _stext && (char *) v < etext)
+                       f = _PAGE_RAM_TEXT;
+               else
+                       f = _PAGE_RAM;
+               map_page(v, p, f);
+               v += PAGE_SIZE;
+               p += PAGE_SIZE;
+       }
+}
+
+/* is x a power of 2? */
+#define is_power_of_2(x)       ((x) != 0 && (((x) & ((x) - 1)) == 0))
+
+/* is x a power of 4? */
+#define is_power_of_4(x)       ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1))
+
+/*
+ * Set up a mapping for a block of I/O.
+ * virt, phys, size must all be page-aligned.
+ * This should only be called before ioremap is called.
+ */
+void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
+                            unsigned int size, int flags)
+{
+       int i;
+
+       if (virt > KERNELBASE && virt < ioremap_bot)
+               ioremap_bot = ioremap_base = virt;
+
+#ifdef HAVE_BATS
+       /*
+        * Use a BAT for this if possible...
+        */
+       if (io_bat_index < 2 && is_power_of_2(size)
+           && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
+               setbat(io_bat_index, virt, phys, size, flags);
+               ++io_bat_index;
+               return;
+       }
+#endif /* HAVE_BATS */
+
+#ifdef HAVE_TLBCAM
+       /*
+        * Use a CAM for this if possible...
+        */
+       if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
+           && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
+               settlbcam(tlbcam_index, virt, phys, size, flags, 0);
+               ++tlbcam_index;
+               return;
+       }
+#endif /* HAVE_TLBCAM */
+
+       /* No BATs available, put it in the page tables. */
+       for (i = 0; i < size; i += PAGE_SIZE)
+               map_page(virt + i, phys + i, flags);
+}
+
+/* Scan the real Linux page tables and return a PTE pointer for
+ * a virtual address in a context.
+ * Returns true (1) if PTE was found, zero otherwise.  The pointer to
+ * the PTE pointer is unmodified if PTE is not found.
+ */
+int
+get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
+{
+        pgd_t  *pgd;
+        pmd_t  *pmd;
+        pte_t  *pte;
+        int     retval = 0;
+
+        pgd = pgd_offset(mm, addr & PAGE_MASK);
+        if (pgd) {
+                pmd = pmd_offset(pgd, addr & PAGE_MASK);
+                if (pmd_present(*pmd)) {
+                        pte = pte_offset_map(pmd, addr & PAGE_MASK);
+                        if (pte) {
+                               retval = 1;
+                               *ptep = pte;
+                               /* XXX caller needs to do pte_unmap, yuck */
+                        }
+                }
+        }
+        return(retval);
+}
+
+/* Find physical address for this virtual address.  Normally used by
+ * I/O functions, but anyone can call it.
+ */
+unsigned long iopa(unsigned long addr)
+{
+       unsigned long pa;
+
+       /* I don't know why this won't work on PMacs or CHRP.  It
+        * appears there is some bug, or there is some implicit
+        * mapping done not properly represented by BATs or in page
+        * tables.......I am actively working on resolving this, but
+        * can't hold up other stuff.  -- Dan
+        */
+       pte_t *pte;
+       struct mm_struct *mm;
+
+       /* Check the BATs */
+       pa = v_mapped_by_bats(addr);
+       if (pa)
+               return pa;
+
+       /* Allow mapping of user addresses (within the thread)
+        * for DMA if necessary.
+        */
+       if (addr < TASK_SIZE)
+               mm = current->mm;
+       else
+               mm = &init_mm;
+
+       pa = 0;
+       if (get_pteptr(mm, addr, &pte)) {
+               pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
+               pte_unmap(pte);
+       }
+
+       return(pa);
+}
+
+/* This is will find the virtual address for a physical one....
+ * Swiped from APUS, could be dangerous :-).
+ * This is only a placeholder until I really find a way to make this
+ * work.  -- Dan
+ */
+unsigned long
+mm_ptov (unsigned long paddr)
+{
+       unsigned long ret;
+#if 0
+       if (paddr < 16*1024*1024)
+               ret = ZTWO_VADDR(paddr);
+       else {
+               int i;
+
+               for (i = 0; i < kmap_chunk_count;){
+                       unsigned long phys = kmap_chunks[i++];
+                       unsigned long size = kmap_chunks[i++];
+                       unsigned long virt = kmap_chunks[i++];
+                       if (paddr >= phys
+                           && paddr < (phys + size)){
+                               ret = virt + paddr - phys;
+                               goto exit;
+                       }
+               }
+       
+               ret = (unsigned long) __va(paddr);
+       }
+exit:
+#ifdef DEBUGPV
+       printk ("PTOV(%lx)=%lx\n", paddr, ret);
+#endif
+#else
+       ret = (unsigned long)paddr + KERNELBASE;
+#endif
+       return ret;
+}
+
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
new file mode 100644 (file)
index 0000000..484d24f
--- /dev/null
@@ -0,0 +1,349 @@
+/*
+ *  This file contains ioremap and related functions for 64-bit machines.
+ *
+ *  Derived from arch/ppc64/mm/init.c
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  Dave Engebretsen <engebret@us.ibm.com>
+ *      Rework for PPC64 port.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/nodemask.h>
+#include <linux/module.h>
+
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#include <asm/rtas.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/tlb.h>
+#include <asm/eeh.h>
+#include <asm/processor.h>
+#include <asm/mmzone.h>
+#include <asm/cputable.h>
+#include <asm/ppcdebug.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/iommu.h>
+#include <asm/abs_addr.h>
+#include <asm/vdso.h>
+#include <asm/imalloc.h>
+
+unsigned long ioremap_bot = IMALLOC_BASE;
+static unsigned long phbs_io_bot = PHBS_IO_BASE;
+
+#ifdef CONFIG_PPC_ISERIES
+
+void __iomem *ioremap(unsigned long addr, unsigned long size)
+{
+       return (void __iomem *)addr;
+}
+
+extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
+                      unsigned long flags)
+{
+       return (void __iomem *)addr;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+       return;
+}
+
+#else
+
+/*
+ * map_io_page currently only called by __ioremap
+ * map_io_page adds an entry to the ioremap page table
+ * and adds an entry to the HPT, possibly bolting it
+ */
+static int map_io_page(unsigned long ea, unsigned long pa, int flags)
+{
+       pgd_t *pgdp;
+       pud_t *pudp;
+       pmd_t *pmdp;
+       pte_t *ptep;
+       unsigned long vsid;
+
+       if (mem_init_done) {
+               spin_lock(&init_mm.page_table_lock);
+               pgdp = pgd_offset_k(ea);
+               pudp = pud_alloc(&init_mm, pgdp, ea);
+               if (!pudp)
+                       return -ENOMEM;
+               pmdp = pmd_alloc(&init_mm, pudp, ea);
+               if (!pmdp)
+                       return -ENOMEM;
+               ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
+               if (!ptep)
+                       return -ENOMEM;
+               set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
+                                                         __pgprot(flags)));
+               spin_unlock(&init_mm.page_table_lock);
+       } else {
+               unsigned long va, vpn, hash, hpteg;
+
+               /*
+                * If the mm subsystem is not fully up, we cannot create a
+                * linux page table entry for this mapping.  Simply bolt an
+                * entry in the hardware page table.
+                */
+               vsid = get_kernel_vsid(ea);
+               va = (vsid << 28) | (ea & 0xFFFFFFF);
+               vpn = va >> PAGE_SHIFT;
+
+               hash = hpt_hash(vpn, 0);
+
+               hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+
+               /* Panic if a pte grpup is full */
+               if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
+                                      HPTE_V_BOLTED,
+                                      _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
+                   == -1) {
+                       panic("map_io_page: could not insert mapping");
+               }
+       }
+       return 0;
+}
+
+
+static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
+                           unsigned long ea, unsigned long size,
+                           unsigned long flags)
+{
+       unsigned long i;
+
+       if ((flags & _PAGE_PRESENT) == 0)
+               flags |= pgprot_val(PAGE_KERNEL);
+
+       for (i = 0; i < size; i += PAGE_SIZE)
+               if (map_io_page(ea+i, pa+i, flags))
+                       return NULL;
+
+       return (void __iomem *) (ea + (addr & ~PAGE_MASK));
+}
+
+
+void __iomem *
+ioremap(unsigned long addr, unsigned long size)
+{
+       return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
+}
+
+void __iomem * __ioremap(unsigned long addr, unsigned long size,
+                        unsigned long flags)
+{
+       unsigned long pa, ea;
+       void __iomem *ret;
+
+       /*
+        * Choose an address to map it to.
+        * Once the imalloc system is running, we use it.
+        * Before that, we map using addresses going
+        * up from ioremap_bot.  imalloc will use
+        * the addresses from ioremap_bot through
+        * IMALLOC_END
+        * 
+        */
+       pa = addr & PAGE_MASK;
+       size = PAGE_ALIGN(addr + size) - pa;
+
+       if (size == 0)
+               return NULL;
+
+       if (mem_init_done) {
+               struct vm_struct *area;
+               area = im_get_free_area(size);
+               if (area == NULL)
+                       return NULL;
+               ea = (unsigned long)(area->addr);
+               ret = __ioremap_com(addr, pa, ea, size, flags);
+               if (!ret)
+                       im_free(area->addr);
+       } else {
+               ea = ioremap_bot;
+               ret = __ioremap_com(addr, pa, ea, size, flags);
+               if (ret)
+                       ioremap_bot += size;
+       }
+       return ret;
+}
+
+#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
+
+int __ioremap_explicit(unsigned long pa, unsigned long ea,
+                      unsigned long size, unsigned long flags)
+{
+       struct vm_struct *area;
+       void __iomem *ret;
+       
+       /* For now, require page-aligned values for pa, ea, and size */
+       if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
+           !IS_PAGE_ALIGNED(size)) {
+               printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
+               return 1;
+       }
+       
+       if (!mem_init_done) {
+               /* Two things to consider in this case:
+                * 1) No records will be kept (imalloc, etc) that the region
+                *    has been remapped
+                * 2) It won't be easy to iounmap() the region later (because
+                *    of 1)
+                */
+               ;
+       } else {
+               area = im_get_area(ea, size,
+                       IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
+               if (area == NULL) {
+                       /* Expected when PHB-dlpar is in play */
+                       return 1;
+               }
+               if (ea != (unsigned long) area->addr) {
+                       printk(KERN_ERR "unexpected addr return from "
+                              "im_get_area\n");
+                       return 1;
+               }
+       }
+       
+       ret = __ioremap_com(pa, pa, ea, size, flags);
+       if (ret == NULL) {
+               printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
+               return 1;
+       }
+       if (ret != (void *) ea) {
+               printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
+               return 1;
+       }
+
+       return 0;
+}
+
+/*  
+ * Unmap an IO region and remove it from imalloc'd list.
+ * Access to IO memory should be serialized by driver.
+ * This code is modeled after vmalloc code - unmap_vm_area()
+ *
+ * XXX what about calls before mem_init_done (ie python_countermeasures())
+ */
+void iounmap(volatile void __iomem *token)
+{
+       void *addr;
+
+       if (!mem_init_done)
+               return;
+       
+       addr = (void *) ((unsigned long __force) token & PAGE_MASK);
+
+       im_free(addr);
+}
+
+static int iounmap_subset_regions(unsigned long addr, unsigned long size)
+{
+       struct vm_struct *area;
+
+       /* Check whether subsets of this region exist */
+       area = im_get_area(addr, size, IM_REGION_SUPERSET);
+       if (area == NULL)
+               return 1;
+
+       while (area) {
+               iounmap((void __iomem *) area->addr);
+               area = im_get_area(addr, size,
+                               IM_REGION_SUPERSET);
+       }
+
+       return 0;
+}
+
+int iounmap_explicit(volatile void __iomem *start, unsigned long size)
+{
+       struct vm_struct *area;
+       unsigned long addr;
+       int rc;
+       
+       addr = (unsigned long __force) start & PAGE_MASK;
+
+       /* Verify that the region either exists or is a subset of an existing
+        * region.  In the latter case, split the parent region to create 
+        * the exact region 
+        */
+       area = im_get_area(addr, size, 
+                           IM_REGION_EXISTS | IM_REGION_SUBSET);
+       if (area == NULL) {
+               /* Determine whether subset regions exist.  If so, unmap */
+               rc = iounmap_subset_regions(addr, size);
+               if (rc) {
+                       printk(KERN_ERR
+                              "%s() cannot unmap nonexistent range 0x%lx\n",
+                               __FUNCTION__, addr);
+                       return 1;
+               }
+       } else {
+               iounmap((void __iomem *) area->addr);
+       }
+       /*
+        * FIXME! This can't be right:
+       iounmap(area->addr);
+        * Maybe it should be "iounmap(area);"
+        */
+       return 0;
+}
+
+#endif
+
+EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+
+void __iomem * reserve_phb_iospace(unsigned long size)
+{
+       void __iomem *virt_addr;
+               
+       if (phbs_io_bot >= IMALLOC_BASE) 
+               panic("reserve_phb_iospace(): phb io space overflow\n");
+                       
+       virt_addr = (void __iomem *) phbs_io_bot;
+       phbs_io_bot += size;
+
+       return virt_addr;
+}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
new file mode 100644 (file)
index 0000000..cef9e83
--- /dev/null
@@ -0,0 +1,285 @@
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU substantially follows the
+ * architecture specification.  This includes the 6xx, 7xx, 7xxx,
+ * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+
+#include <asm/prom.h>
+#include <asm/mmu.h>
+#include <asm/machdep.h>
+#include <asm/lmb.h>
+
+#include "mmu_decl.h"
+
+PTE *Hash, *Hash_end;
+unsigned long Hash_size, Hash_mask;
+unsigned long _SDR1;
+
+union ubat {                   /* BAT register values to be loaded */
+       BAT     bat;
+#ifdef CONFIG_PPC64BRIDGE
+       u64     word[2];
+#else
+       u32     word[2];
+#endif
+} BATS[4][2];                  /* 4 pairs of IBAT, DBAT */
+
+struct batrange {              /* stores address ranges mapped by BATs */
+       unsigned long start;
+       unsigned long limit;
+       unsigned long phys;
+} bat_addrs[4];
+
+/*
+ * Return PA for this VA if it is mapped by a BAT, or 0
+ */
+unsigned long v_mapped_by_bats(unsigned long va)
+{
+       int b;
+       for (b = 0; b < 4; ++b)
+               if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
+                       return bat_addrs[b].phys + (va - bat_addrs[b].start);
+       return 0;
+}
+
+/*
+ * Return VA for a given PA or 0 if not mapped
+ */
+unsigned long p_mapped_by_bats(unsigned long pa)
+{
+       int b;
+       for (b = 0; b < 4; ++b)
+               if (pa >= bat_addrs[b].phys
+                   && pa < (bat_addrs[b].limit-bat_addrs[b].start)
+                             +bat_addrs[b].phys)
+                       return bat_addrs[b].start+(pa-bat_addrs[b].phys);
+       return 0;
+}
+
+unsigned long __init mmu_mapin_ram(void)
+{
+#ifdef CONFIG_POWER4
+       return 0;
+#else
+       unsigned long tot, bl, done;
+       unsigned long max_size = (256<<20);
+       unsigned long align;
+
+       if (__map_without_bats)
+               return 0;
+
+       /* Set up BAT2 and if necessary BAT3 to cover RAM. */
+
+       /* Make sure we don't map a block larger than the
+          smallest alignment of the physical address. */
+       /* alignment of PPC_MEMSTART */
+       align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
+       /* set BAT block size to MIN(max_size, align) */
+       if (align && align < max_size)
+               max_size = align;
+
+       tot = total_lowmem;
+       for (bl = 128<<10; bl < max_size; bl <<= 1) {
+               if (bl * 2 > tot)
+                       break;
+       }
+
+       setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM);
+       done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
+       if ((done < tot) && !bat_addrs[3].limit) {
+               /* use BAT3 to cover a bit more */
+               tot -= done;
+               for (bl = 128<<10; bl < max_size; bl <<= 1)
+                       if (bl * 2 > tot)
+                               break;
+               setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM);
+               done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
+       }
+
+       return done;
+#endif
+}
+
+/*
+ * Set up one of the I/D BAT (block address translation) register pairs.
+ * The parameters are not checked; in particular size must be a power
+ * of 2 between 128k and 256M.
+ */
+void __init setbat(int index, unsigned long virt, unsigned long phys,
+                  unsigned int size, int flags)
+{
+       unsigned int bl;
+       int wimgxpp;
+       union ubat *bat = BATS[index];
+
+       if (((flags & _PAGE_NO_CACHE) == 0) &&
+           cpu_has_feature(CPU_FTR_NEED_COHERENT))
+               flags |= _PAGE_COHERENT;
+
+       bl = (size >> 17) - 1;
+       if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
+               /* 603, 604, etc. */
+               /* Do DBAT first */
+               wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
+                                  | _PAGE_COHERENT | _PAGE_GUARDED);
+               wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
+               bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
+               bat[1].word[1] = phys | wimgxpp;
+#ifndef CONFIG_KGDB /* want user access for breakpoints */
+               if (flags & _PAGE_USER)
+#endif
+                       bat[1].bat.batu.vp = 1;
+               if (flags & _PAGE_GUARDED) {
+                       /* G bit must be zero in IBATs */
+                       bat[0].word[0] = bat[0].word[1] = 0;
+               } else {
+                       /* make IBAT same as DBAT */
+                       bat[0] = bat[1];
+               }
+       } else {
+               /* 601 cpu */
+               if (bl > BL_8M)
+                       bl = BL_8M;
+               wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
+                                  | _PAGE_COHERENT);
+               wimgxpp |= (flags & _PAGE_RW)?
+                       ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
+               bat->word[0] = virt | wimgxpp | 4;      /* Ks=0, Ku=1 */
+               bat->word[1] = phys | bl | 0x40;        /* V=1 */
+       }
+
+       bat_addrs[index].start = virt;
+       bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
+       bat_addrs[index].phys = phys;
+}
+
+/*
+ * Initialize the hash table and patch the instructions in hashtable.S.
+ */
+void __init MMU_init_hw(void)
+{
+       unsigned int hmask, mb, mb2;
+       unsigned int n_hpteg, lg_n_hpteg;
+
+       extern unsigned int hash_page_patch_A[];
+       extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
+       extern unsigned int hash_page[];
+       extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
+
+       if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
+               /*
+                * Put a blr (procedure return) instruction at the
+                * start of hash_page, since we can still get DSI
+                * exceptions on a 603.
+                */
+               hash_page[0] = 0x4e800020;
+               flush_icache_range((unsigned long) &hash_page[0],
+                                  (unsigned long) &hash_page[1]);
+               return;
+       }
+
+       if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
+
+#ifdef CONFIG_PPC64BRIDGE
+#define LG_HPTEG_SIZE  7               /* 128 bytes per HPTEG */
+#define SDR1_LOW_BITS  (lg_n_hpteg - 11)
+#define MIN_N_HPTEG    2048            /* min 256kB hash table */
+#else
+#define LG_HPTEG_SIZE  6               /* 64 bytes per HPTEG */
+#define SDR1_LOW_BITS  ((n_hpteg - 1) >> 10)
+#define MIN_N_HPTEG    1024            /* min 64kB hash table */
+#endif
+
+       /*
+        * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
+        * This is less than the recommended amount, but then
+        * Linux ain't AIX.
+        */
+       n_hpteg = total_memory / (PAGE_SIZE * 8);
+       if (n_hpteg < MIN_N_HPTEG)
+               n_hpteg = MIN_N_HPTEG;
+       lg_n_hpteg = __ilog2(n_hpteg);
+       if (n_hpteg & (n_hpteg - 1)) {
+               ++lg_n_hpteg;           /* round up if not power of 2 */
+               n_hpteg = 1 << lg_n_hpteg;
+       }
+       Hash_size = n_hpteg << LG_HPTEG_SIZE;
+
+       /*
+        * Find some memory for the hash table.
+        */
+       if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
+       Hash = __va(lmb_alloc_base(Hash_size, Hash_size,
+                                  __initial_memory_limit));
+       cacheable_memzero(Hash, Hash_size);
+       _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
+
+       Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
+
+       printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
+              total_memory >> 20, Hash_size >> 10, Hash);
+
+
+       /*
+        * Patch up the instructions in hashtable.S:create_hpte
+        */
+       if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
+       Hash_mask = n_hpteg - 1;
+       hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
+       mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
+       if (lg_n_hpteg > 16)
+               mb2 = 16 - LG_HPTEG_SIZE;
+
+       hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
+               | ((unsigned int)(Hash) >> 16);
+       hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
+       hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6);
+       hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask;
+       hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
+
+       /*
+        * Ensure that the locations we've patched have been written
+        * out from the data cache and invalidated in the instruction
+        * cache, on those machines with split caches.
+        */
+       flush_icache_range((unsigned long) &hash_page_patch_A[0],
+                          (unsigned long) &hash_page_patch_C[1]);
+
+       /*
+        * Patch up the instructions in hashtable.S:flush_hash_page
+        */
+       flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff)
+               | ((unsigned int)(Hash) >> 16);
+       flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6);
+       flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6);
+       flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
+       flush_icache_range((unsigned long) &flush_hash_patch_A[0],
+                          (unsigned long) &flush_hash_patch_B[1]);
+
+       if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
+}
similarity index 100%
rename from arch/ppc64/mm/slb.c
rename to arch/powerpc/mm/slb.c
similarity index 100%
rename from arch/ppc64/mm/stab.c
rename to arch/powerpc/mm/stab.c
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
new file mode 100644 (file)
index 0000000..6c3dc3c
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * This file contains the routines for TLB flushing.
+ * On machines where the MMU uses a hash table to store virtual to
+ * physical translations, these routines flush entries from the
+ * hash table also.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+#include "mmu_decl.h"
+
+/*
+ * Called when unmapping pages to flush entries from the TLB/hash table.
+ */
+void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
+{
+       unsigned long ptephys;
+
+       if (Hash != 0) {
+               ptephys = __pa(ptep) & PAGE_MASK;
+               flush_hash_pages(mm->context, addr, ptephys, 1);
+       }
+}
+
+/*
+ * Called by ptep_set_access_flags, must flush on CPUs for which the
+ * DSI handler can't just "fixup" the TLB on a write fault
+ */
+void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
+{
+       if (Hash != 0)
+               return;
+       _tlbie(addr);
+}
+
+/*
+ * Called at the end of a mmu_gather operation to make sure the
+ * TLB flush is completely done.
+ */
+void tlb_flush(struct mmu_gather *tlb)
+{
+       if (Hash == 0) {
+               /*
+                * 603 needs to flush the whole TLB here since
+                * it doesn't use a hash table.
+                */
+               _tlbia();
+       }
+}
+
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
+ *  - flush_tlb_kernel_range(start, end) flushes kernel pages
+ *
+ * since the hardware hash table functions as an extension of the
+ * tlb as far as the linux tables are concerned, flush it too.
+ *    -- Cort
+ */
+
+/*
+ * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
+ * the cache operations on the bus.  Hence we need to use an IPI
+ * to get the other CPU(s) to invalidate their TLBs.
+ */
+#ifdef CONFIG_SMP_750
+#define FINISH_FLUSH   smp_send_tlb_invalidate(0)
+#else
+#define FINISH_FLUSH   do { } while (0)
+#endif
+
+static void flush_range(struct mm_struct *mm, unsigned long start,
+                       unsigned long end)
+{
+       pmd_t *pmd;
+       unsigned long pmd_end;
+       int count;
+       unsigned int ctx = mm->context;
+
+       if (Hash == 0) {
+               _tlbia();
+               return;
+       }
+       start &= PAGE_MASK;
+       if (start >= end)
+               return;
+       end = (end - 1) | ~PAGE_MASK;
+       pmd = pmd_offset(pgd_offset(mm, start), start);
+       for (;;) {
+               pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
+               if (pmd_end > end)
+                       pmd_end = end;
+               if (!pmd_none(*pmd)) {
+                       count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
+                       flush_hash_pages(ctx, start, pmd_val(*pmd), count);
+               }
+               if (pmd_end == end)
+                       break;
+               start = pmd_end + 1;
+               ++pmd;
+       }
+}
+
+/*
+ * Flush kernel TLB entries in the given range
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       flush_range(&init_mm, start, end);
+       FINISH_FLUSH;
+}
+
+/*
+ * Flush all the (user) entries for the address space described by mm.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+       struct vm_area_struct *mp;
+
+       if (Hash == 0) {
+               _tlbia();
+               return;
+       }
+
+       for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
+               flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
+       FINISH_FLUSH;
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+       struct mm_struct *mm;
+       pmd_t *pmd;
+
+       if (Hash == 0) {
+               _tlbie(vmaddr);
+               return;
+       }
+       mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
+       pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
+       if (!pmd_none(*pmd))
+               flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
+       FINISH_FLUSH;
+}
+
+/*
+ * For each address in the range, find the pte for the address
+ * and check _PAGE_HASHPTE bit; if it is set, find and destroy
+ * the corresponding HPTE.
+ */
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                    unsigned long end)
+{
+       flush_range(vma->vm_mm, start, end);
+       FINISH_FLUSH;
+}
similarity index 92%
rename from arch/ppc64/mm/tlb.c
rename to arch/powerpc/mm/tlb_64.c
index 21fbffb..09ab81a 100644 (file)
@@ -128,12 +128,10 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
 void hpte_update(struct mm_struct *mm, unsigned long addr,
                 unsigned long pte, int wrprot)
 {
-       int i;
-       unsigned long context = 0;
        struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+       unsigned long vsid;
+       int i;
 
-       if (REGION_ID(addr) == USER_REGION_ID)
-               context = mm->context.id;
        i = batch->index;
 
        /*
@@ -143,19 +141,21 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
         * up scanning and resetting referenced bits then our batch context
         * will change mid stream.
         */
-       if (i != 0 && (context != batch->context ||
-                      batch->large != pte_huge(pte))) {
+       if (i != 0 && (mm != batch->mm || batch->large != pte_huge(pte))) {
                flush_tlb_pending();
                i = 0;
        }
-
        if (i == 0) {
-               batch->context = context;
                batch->mm = mm;
                batch->large = pte_huge(pte);
        }
+       if (addr < KERNELBASE) {
+               vsid = get_vsid(mm->context.id, addr);
+               WARN_ON(vsid == 0);
+       } else
+               vsid = get_kernel_vsid(addr);
+       batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
        batch->pte[i] = __pte(pte);
-       batch->addr[i] = addr;
        batch->index = ++i;
        if (i >= PPC64_TLB_BATCH_NR)
                flush_tlb_pending();
@@ -177,10 +177,9 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
                local = 1;
 
        if (i == 1)
-               flush_hash_page(batch->context, batch->addr[0], batch->pte[0],
-                               local);
+               flush_hash_page(batch->vaddr[0], batch->pte[0], local);
        else
-               flush_hash_range(batch->context, i, local);
+               flush_hash_range(i, local);
        batch->index = 0;
        put_cpu();
 }
similarity index 69%
rename from arch/ppc/oprofile/Makefile
rename to arch/powerpc/oprofile/Makefile
index e2218d3..0782d0c 100644 (file)
@@ -7,8 +7,5 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
                timer_int.o )
 
 oprofile-y := $(DRIVER_OBJS) common.o
-
-ifeq ($(CONFIG_FSL_BOOKE),y)
-       oprofile-y += op_model_fsl_booke.o
-endif
-
+oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
+oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
similarity index 62%
rename from arch/ppc64/oprofile/common.c
rename to arch/powerpc/oprofile/common.c
index e5f5727..af2c05d 100644 (file)
@@ -1,5 +1,9 @@
 /*
+ * PPC 64 oprofile support:
  * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
+ * PPC 32 oprofile support: (based on PPC 64 support)
+ * Copyright (C) Freescale Semiconductor, Inc 2004
+ *     Author: Andy Fleming
  *
  * Based on alpha version.
  *
@@ -10,6 +14,9 @@
  */
 
 #include <linux/oprofile.h>
+#ifndef __powerpc64__
+#include <linux/slab.h>
+#endif /* ! __powerpc64__ */
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/errno.h>
 #include <asm/cputable.h>
 #include <asm/oprofile_impl.h>
 
-static struct op_ppc64_model *model;
+static struct op_powerpc_model *model;
 
 static struct op_counter_config ctr[OP_MAX_COUNTER];
 static struct op_system_config sys;
 
+#ifndef __powerpc64__
+static char *cpu_type;
+#endif /* ! __powerpc64__ */
+
 static void op_handle_interrupt(struct pt_regs *regs)
 {
        model->handle_interrupt(regs, ctr);
 }
 
-static int op_ppc64_setup(void)
+static int op_powerpc_setup(void)
 {
        int err;
 
@@ -42,41 +53,49 @@ static int op_ppc64_setup(void)
        model->reg_setup(ctr, &sys, model->num_counters);
 
        /* Configure the registers on all cpus.  */
+#ifdef __powerpc64__
        on_each_cpu(model->cpu_setup, NULL, 0, 1);
+#else /* __powerpc64__ */
+#if 0
+       /* FIXME: Make multi-cpu work */
+       on_each_cpu(model->reg_setup, NULL, 0, 1);
+#endif
+#endif /* __powerpc64__ */
 
        return 0;
 }
 
-static void op_ppc64_shutdown(void)
+static void op_powerpc_shutdown(void)
 {
        release_pmc_hardware();
 }
 
-static void op_ppc64_cpu_start(void *dummy)
+static void op_powerpc_cpu_start(void *dummy)
 {
        model->start(ctr);
 }
 
-static int op_ppc64_start(void)
+static int op_powerpc_start(void)
 {
-       on_each_cpu(op_ppc64_cpu_start, NULL, 0, 1);
+       on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
        return 0;
 }
 
-static inline void op_ppc64_cpu_stop(void *dummy)
+static inline void op_powerpc_cpu_stop(void *dummy)
 {
        model->stop();
 }
 
-static void op_ppc64_stop(void)
+static void op_powerpc_stop(void)
 {
-       on_each_cpu(op_ppc64_cpu_stop, NULL, 0, 1);
+       on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
 }
 
-static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
+static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
 {
        int i;
 
+#ifdef __powerpc64__
        /*
         * There is one mmcr0, mmcr1 and mmcra for setting the events for
         * all of the counters.
@@ -84,6 +103,7 @@ static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
        oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
        oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
        oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
+#endif /* __powerpc64__ */
 
        for (i = 0; i < model->num_counters; ++i) {
                struct dentry *dir;
@@ -95,44 +115,70 @@ static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
                oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
                oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
                oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
+#ifdef __powerpc64__
                /*
                 * We dont support per counter user/kernel selection, but
                 * we leave the entries because userspace expects them
                 */
+#endif /* __powerpc64__ */
                oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
                oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
+
+#ifndef __powerpc64__
+               /* FIXME: Not sure if this is used */
+#endif /* ! __powerpc64__ */
                oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
        }
 
        oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
        oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
+#ifdef __powerpc64__
        oprofilefs_create_ulong(sb, root, "backtrace_spinlocks",
                                &sys.backtrace_spinlocks);
+#endif /* __powerpc64__ */
 
        /* Default to tracing both kernel and user */
        sys.enable_kernel = 1;
        sys.enable_user = 1;
-
+#ifdef __powerpc64__
        /* Turn on backtracing through spinlocks by default */
        sys.backtrace_spinlocks = 1;
+#endif /* __powerpc64__ */
 
        return 0;
 }
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
+#ifndef __powerpc64__
+#ifdef CONFIG_FSL_BOOKE
+       model = &op_model_fsl_booke;
+#else
+       return -ENODEV;
+#endif
+
+       cpu_type = kmalloc(32, GFP_KERNEL);
+       if (NULL == cpu_type)
+               return -ENOMEM;
+
+       sprintf(cpu_type, "ppc/%s", cur_cpu_spec->cpu_name);
+
+       model->num_counters = cur_cpu_spec->num_pmcs;
+
+       ops->cpu_type = cpu_type;
+#else /* __powerpc64__ */
        if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type)
                return -ENODEV;
-
        model = cur_cpu_spec->oprofile_model;
        model->num_counters = cur_cpu_spec->num_pmcs;
 
        ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
-       ops->create_files = op_ppc64_create_files;
-       ops->setup = op_ppc64_setup;
-       ops->shutdown = op_ppc64_shutdown;
-       ops->start = op_ppc64_start;
-       ops->stop = op_ppc64_stop;
+#endif /* __powerpc64__ */
+       ops->create_files = op_powerpc_create_files;
+       ops->setup = op_powerpc_setup;
+       ops->shutdown = op_powerpc_shutdown;
+       ops->start = op_powerpc_start;
+       ops->stop = op_powerpc_stop;
 
        printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
               ops->cpu_type);
@@ -142,4 +188,8 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 
 void oprofile_arch_exit(void)
 {
+#ifndef __powerpc64__
+       kfree(cpu_type);
+       cpu_type = NULL;
+#endif /* ! __powerpc64__ */
 }
similarity index 97%
rename from arch/ppc/oprofile/op_model_fsl_booke.c
rename to arch/powerpc/oprofile/op_model_fsl_booke.c
index fc9c859..86124a9 100644 (file)
@@ -24,9 +24,8 @@
 #include <asm/cputable.h>
 #include <asm/reg_booke.h>
 #include <asm/page.h>
-#include <asm/perfmon.h>
-
-#include "op_impl.h"
+#include <asm/pmc.h>
+#include <asm/oprofile_impl.h>
 
 static unsigned long reset_value[OP_MAX_COUNTER];
 
@@ -176,7 +175,7 @@ static void fsl_booke_handle_interrupt(struct pt_regs *regs,
        pmc_start_ctrs(1);
 }
 
-struct op_ppc32_model op_model_fsl_booke = {
+struct op_powerpc_model op_model_fsl_booke = {
        .reg_setup              = fsl_booke_reg_setup,
        .start                  = fsl_booke_start,
        .stop                   = fsl_booke_stop,
similarity index 99%
rename from arch/ppc64/oprofile/op_model_power4.c
rename to arch/powerpc/oprofile/op_model_power4.c
index 32b2bb5..8864493 100644 (file)
@@ -300,7 +300,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
        mtspr(SPRN_MMCR0, mmcr0);
 }
 
-struct op_ppc64_model op_model_power4 = {
+struct op_powerpc_model op_model_power4 = {
        .reg_setup              = power4_reg_setup,
        .cpu_setup              = power4_cpu_setup,
        .start                  = power4_start,
similarity index 99%
rename from arch/ppc64/oprofile/op_model_rs64.c
rename to arch/powerpc/oprofile/op_model_rs64.c
index 08c5b33..e010b85 100644 (file)
@@ -209,7 +209,7 @@ static void rs64_handle_interrupt(struct pt_regs *regs,
        mtspr(SPRN_MMCR0, mmcr0);
 }
 
-struct op_ppc64_model op_model_rs64 = {
+struct op_powerpc_model op_model_rs64 = {
        .reg_setup              = rs64_reg_setup,
        .cpu_setup              = rs64_cpu_setup,
        .start                  = rs64_start,
diff --git a/arch/powerpc/platforms/4xx/Kconfig b/arch/powerpc/platforms/4xx/Kconfig
new file mode 100644 (file)
index 0000000..ed39d6a
--- /dev/null
@@ -0,0 +1,280 @@
+config 4xx
+       bool
+       depends on 40x || 44x
+       default y
+
+config WANT_EARLY_SERIAL
+       bool
+       select SERIAL_8250
+       default n
+
+menu "AMCC 4xx options"
+       depends on 4xx
+
+choice
+       prompt "Machine Type"
+       depends on 40x
+       default WALNUT
+
+config BUBINGA
+       bool "Bubinga"
+       select WANT_EARLY_SERIAL
+       help
+         This option enables support for the IBM 405EP evaluation board.
+
+config CPCI405
+       bool "CPCI405"
+       help
+         This option enables support for the CPCI405 board.
+
+config EP405
+       bool "EP405/EP405PC"
+       help
+         This option enables support for the EP405/EP405PC boards.
+
+config REDWOOD_5
+       bool "Redwood-5"
+       help
+         This option enables support for the IBM STB04 evaluation board.
+
+config REDWOOD_6
+       bool "Redwood-6"
+       help
+         This option enables support for the IBM STBx25xx evaluation board.
+
+config SYCAMORE
+       bool "Sycamore"
+       help
+         This option enables support for the IBM PPC405GPr evaluation board.
+
+config WALNUT
+       bool "Walnut"
+       help
+         This option enables support for the IBM PPC405GP evaluation board.
+
+config XILINX_ML300
+       bool "Xilinx-ML300"
+       help
+         This option enables support for the Xilinx ML300 evaluation board.
+
+endchoice
+
+choice
+       prompt "Machine Type"
+       depends on 44x
+       default EBONY
+
+config BAMBOO
+       bool "Bamboo"
+       select WANT_EARLY_SERIAL
+       help
+         This option enables support for the IBM PPC440EP evaluation board.
+
+config EBONY
+       bool "Ebony"
+       select WANT_EARLY_SERIAL
+       help
+         This option enables support for the IBM PPC440GP evaluation board.
+
+config LUAN
+       bool "Luan"
+       select WANT_EARLY_SERIAL
+       help
+         This option enables support for the IBM PPC440SP evaluation board.
+
+config OCOTEA
+       bool "Ocotea"
+       select WANT_EARLY_SERIAL
+       help
+         This option enables support for the IBM PPC440GX evaluation board.
+
+endchoice
+
+config EP405PC
+       bool "EP405PC Support"
+       depends on EP405
+
+
+# It's often necessary to know the specific 4xx processor type.
+# Fortunately, it is impled (so far) from the board type, so we
+# don't need to ask more redundant questions.
+config NP405H
+       bool
+       depends on ASH
+       default y
+
+config 440EP
+       bool
+       depends on BAMBOO
+       select PPC_FPU
+       default y
+
+config 440GP
+       bool
+       depends on EBONY
+       default y
+
+config 440GX
+       bool
+       depends on OCOTEA
+       default y
+
+config 440SP
+       bool
+       depends on LUAN
+       default y
+
+config 440
+       bool
+       depends on 440GP || 440SP || 440EP
+       default y
+
+config 440A
+       bool
+       depends on 440GX
+       default y
+
+config IBM440EP_ERR42
+       bool
+       depends on 440EP
+       default y
+
+# All 405-based cores up until the 405GPR and 405EP have this errata.
+config IBM405_ERR77
+       bool
+       depends on 40x && !403GCX && !405GPR && !405EP
+       default y
+
+# All 40x-based cores, up until the 405GPR and 405EP have this errata.
+config IBM405_ERR51
+       bool
+       depends on 40x && !405GPR && !405EP
+       default y
+
+config BOOKE
+       bool
+       depends on 44x
+       default y
+
+config IBM_OCP
+       bool
+       depends on ASH || BAMBOO || BUBINGA || CPCI405 || EBONY || EP405 || LUAN || OCOTEA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
+       default y
+
+config XILINX_OCP
+       bool
+       depends on XILINX_ML300
+       default y
+
+config IBM_EMAC4
+       bool
+       depends on 440GX || 440SP
+       default y
+
+config BIOS_FIXUP
+       bool
+       depends on BUBINGA || EP405 || SYCAMORE || WALNUT
+       default y
+
+# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
+config 403GCX
+       bool
+       depends OAK
+       default y
+
+config 405EP
+       bool
+       depends on BUBINGA
+       default y
+
+config 405GP
+       bool
+       depends on CPCI405 || EP405 || WALNUT
+       default y
+
+config 405GPR
+       bool
+       depends on SYCAMORE
+       default y
+
+config VIRTEX_II_PRO
+       bool
+       depends on XILINX_ML300
+       default y
+
+config STB03xxx
+       bool
+       depends on REDWOOD_5 || REDWOOD_6
+       default y
+
+config EMBEDDEDBOOT
+       bool
+       depends on EP405 || XILINX_ML300
+       default y
+
+config IBM_OPENBIOS
+       bool
+       depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
+       default y
+
+config PPC4xx_DMA
+       bool "PPC4xx DMA controller support"
+       depends on 4xx
+
+config PPC4xx_EDMA
+       bool
+       depends on !STB03xxx && PPC4xx_DMA
+       default y
+
+config PPC_GEN550
+       bool
+       depends on 4xx
+       default y
+
+choice
+       prompt "TTYS0 device and default console"
+       depends on 40x
+       default UART0_TTYS0
+
+config UART0_TTYS0
+       bool "UART0"
+
+config UART0_TTYS1
+       bool "UART1"
+
+endchoice
+
+config SERIAL_SICC
+       bool "SICC Serial port support"
+       depends on STB03xxx
+
+config UART1_DFLT_CONSOLE
+       bool
+       depends on SERIAL_SICC && UART0_TTYS1
+       default y
+
+config SERIAL_SICC_CONSOLE
+       bool
+       depends on SERIAL_SICC && UART0_TTYS1
+       default y
+endmenu
+
+
+menu "IBM 40x options"
+       depends on 40x
+
+config SERIAL_SICC
+       bool "SICC Serial port"
+       depends on STB03xxx
+
+config UART1_DFLT_CONSOLE
+       bool
+       depends on SERIAL_SICC && UART0_TTYS1
+       default y
+
+config SERIAL_SICC_CONSOLE
+       bool
+       depends on SERIAL_SICC && UART0_TTYS1
+       default y
+
+endmenu
diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile
new file mode 100644 (file)
index 0000000..79ff6b1
--- /dev/null
@@ -0,0 +1 @@
+# empty makefile so make clean works
\ No newline at end of file
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
new file mode 100644 (file)
index 0000000..c5bc282
--- /dev/null
@@ -0,0 +1,86 @@
+config 85xx
+       bool
+       depends on E500
+       default y
+
+config PPC_INDIRECT_PCI_BE
+       bool
+       depends on 85xx
+       default y
+
+menu "Freescale 85xx options"
+       depends on E500
+
+choice
+       prompt "Machine Type"
+       depends on 85xx
+       default MPC8540_ADS
+
+config MPC8540_ADS
+       bool "Freescale MPC8540 ADS"
+       help
+         This option enables support for the MPC 8540 ADS evaluation board.
+
+config MPC8548_CDS
+       bool "Freescale MPC8548 CDS"
+       help
+         This option enablese support for the MPC8548 CDS evaluation board.
+
+config MPC8555_CDS
+       bool "Freescale MPC8555 CDS"
+       help
+         This option enablese support for the MPC8555 CDS evaluation board.
+
+config MPC8560_ADS
+       bool "Freescale MPC8560 ADS"
+       help
+         This option enables support for the MPC 8560 ADS evaluation board.
+
+config SBC8560
+       bool "WindRiver PowerQUICC III SBC8560"
+       help
+         This option enables support for the WindRiver PowerQUICC III 
+         SBC8560 board.
+
+config STX_GP3
+       bool "Silicon Turnkey Express GP3"
+       help
+         This option enables support for the Silicon Turnkey Express GP3
+         board.
+
+endchoice
+
+# It's often necessary to know the specific 85xx processor type.
+# Fortunately, it is implied (so far) from the board type, so we
+# don't need to ask more redundant questions.
+config MPC8540
+       bool
+       depends on MPC8540_ADS
+       default y
+
+config MPC8548
+       bool
+       depends on MPC8548_CDS
+       default y
+
+config MPC8555
+       bool
+       depends on MPC8555_CDS
+       default y
+
+config MPC8560
+       bool
+       depends on SBC8560 || MPC8560_ADS || STX_GP3
+       default y
+
+config 85xx_PCI2
+       bool "Supprt for 2nd PCI host controller"
+       depends on MPC8555_CDS
+       default y
+
+config PPC_GEN550
+       bool
+       depends on MPC8540 || SBC8560 || MPC8555
+       default y
+
+endmenu
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
new file mode 100644 (file)
index 0000000..6407197
--- /dev/null
@@ -0,0 +1 @@
+# empty makefile so make clean works
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
new file mode 100644 (file)
index 0000000..c8c0ba3
--- /dev/null
@@ -0,0 +1,352 @@
+config FADS
+       bool
+
+choice
+       prompt "8xx Machine Type"
+       depends on 8xx
+       default RPXLITE
+
+config RPXLITE
+       bool "RPX-Lite"
+       ---help---
+         Single-board computers based around the PowerPC MPC8xx chips and
+         intended for embedded applications.  The following types are
+         supported:
+
+         RPX-Lite:
+         Embedded Planet RPX Lite. PC104 form-factor SBC based on the MPC823.
+
+         RPX-Classic:
+         Embedded Planet RPX Classic Low-fat. Credit-card-size SBC based on
+         the MPC 860
+
+         BSE-IP:
+         Bright Star Engineering ip-Engine.
+
+         TQM823L:
+         TQM850L:
+         TQM855L:
+         TQM860L:
+         MPC8xx based family of mini modules, half credit card size,
+         up to 64 MB of RAM, 8 MB Flash, (Fast) Ethernet, 2 x serial ports,
+         2 x CAN bus interface, ...
+         Manufacturer: TQ Components, www.tq-group.de
+         Date of Release: October (?) 1999
+         End of Life: not yet :-)
+         URL:
+         - module: <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>
+         - starter kit: <http://www.denx.de/PDF/STK8xxLHWM201.pdf>
+         - images: <http://www.denx.de/embedded-ppc-en.html>
+
+         FPS850L:
+         FingerPrint Sensor System (based on TQM850L)
+         Manufacturer: IKENDI AG, <http://www.ikendi.com/>
+         Date of Release: November 1999
+         End of life: end 2000 ?
+         URL: see TQM850L
+
+         IVMS8:
+         MPC860 based board used in the "Integrated Voice Mail System",
+         Small Version (8 voice channels)
+         Manufacturer: Speech Design, <http://www.speech-design.de/>
+         Date of Release: December 2000 (?)
+         End of life: -
+         URL: <http://www.speech-design.de/>
+
+         IVML24:
+         MPC860 based board used in the "Integrated Voice Mail System",
+         Large Version (24 voice channels)
+         Manufacturer: Speech Design, <http://www.speech-design.de/>
+         Date of Release: March 2001  (?)
+         End of life: -
+         URL: <http://www.speech-design.de/>
+
+         HERMES:
+         Hermes-Pro ISDN/LAN router with integrated 8 x hub
+         Manufacturer: Multidata Gesellschaft fur Datentechnik und Informatik
+         <http://www.multidata.de/>
+         Date of Release: 2000 (?)
+         End of life: -
+         URL: <http://www.multidata.de/english/products/hpro.htm>
+
+         IP860:
+         VMEBus IP (Industry Pack) carrier board with MPC860
+         Manufacturer: MicroSys GmbH, <http://www.microsys.de/>
+         Date of Release: ?
+         End of life: -
+         URL: <http://www.microsys.de/html/ip860.html>
+
+         PCU_E:
+         PCU = Peripheral Controller Unit, Extended
+         Manufacturer: Siemens AG, ICN (Information and Communication Networks)
+               <http://www.siemens.de/page/1,3771,224315-1-999_2_226207-0,00.html>
+         Date of Release: April 2001
+         End of life: August 2001
+         URL: n. a.
+
+config RPXCLASSIC
+       bool "RPX-Classic"
+       help
+         The RPX-Classic is a single-board computer based on the Motorola
+         MPC860.  It features 16MB of DRAM and a variable amount of flash,
+         I2C EEPROM, thermal monitoring, a PCMCIA slot, a DIP switch and two
+         LEDs.  Variants with Ethernet ports exist.  Say Y here to support it
+         directly.
+
+config BSEIP
+       bool "BSE-IP"
+       help
+         Say Y here to support the Bright Star Engineering ipEngine SBC.
+         This is a credit-card-sized device featuring a MPC823 processor,
+         26MB DRAM, 4MB flash, Ethernet, a 16K-gate FPGA, USB, an LCD/video
+         controller, and two RS232 ports.
+
+config MPC8XXFADS
+       bool "FADS"
+       select FADS
+
+config MPC86XADS
+       bool "MPC86XADS"
+       help
+         MPC86x Application Development System by Freescale Semiconductor.
+         The MPC86xADS is meant to serve as a platform for s/w and h/w
+         development around the MPC86X processor families.
+       select FADS
+
+config MPC885ADS
+       bool "MPC885ADS"
+       help
+         Freescale Semiconductor MPC885 Application Development System (ADS).
+         Also known as DUET.
+         The MPC885ADS is meant to serve as a platform for s/w and h/w
+         development around the MPC885 processor family.
+
+config TQM823L
+       bool "TQM823L"
+       help
+         Say Y here to support the TQM823L, one of an MPC8xx-based family of
+         mini SBCs (half credit-card size) from TQ Components first released
+         in late 1999.  Technical references are at
+         <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
+         <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
+         <http://www.denx.de/embedded-ppc-en.html>.
+
+config TQM850L
+       bool "TQM850L"
+       help
+         Say Y here to support the TQM850L, one of an MPC8xx-based family of
+         mini SBCs (half credit-card size) from TQ Components first released
+         in late 1999.  Technical references are at
+         <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
+         <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
+         <http://www.denx.de/embedded-ppc-en.html>.
+
+config TQM855L
+       bool "TQM855L"
+       help
+         Say Y here to support the TQM855L, one of an MPC8xx-based family of
+         mini SBCs (half credit-card size) from TQ Components first released
+         in late 1999.  Technical references are at
+         <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
+         <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
+         <http://www.denx.de/embedded-ppc-en.html>.
+
+config TQM860L
+       bool "TQM860L"
+       help
+         Say Y here to support the TQM860L, one of an MPC8xx-based family of
+         mini SBCs (half credit-card size) from TQ Components first released
+         in late 1999.  Technical references are at
+         <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
+         <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
+         <http://www.denx.de/embedded-ppc-en.html>.
+
+config FPS850L
+       bool "FPS850L"
+
+config IVMS8
+       bool "IVMS8"
+       help
+         Say Y here to support the Integrated Voice-Mail Small 8-channel SBC
+         from Speech Design, released March 2001.  The manufacturer's website
+         is at <http://www.speech-design.de/>.
+
+config IVML24
+       bool "IVML24"
+       help
+         Say Y here to support the Integrated Voice-Mail Large 24-channel SBC
+         from Speech Design, released March 2001.  The manufacturer's website
+         is at <http://www.speech-design.de/>.
+
+config HERMES_PRO
+       bool "HERMES"
+
+config IP860
+       bool "IP860"
+
+config LWMON
+       bool "LWMON"
+
+config PCU_E
+       bool "PCU_E"
+
+config CCM
+       bool "CCM"
+
+config LANTEC
+       bool "LANTEC"
+
+config MBX
+       bool "MBX"
+       help
+         MBX is a line of Motorola single-board computer based around the
+         MPC821 and MPC860 processors, and intended for embedded-controller
+         applications.  Say Y here to support these boards directly.
+
+config WINCEPT
+       bool "WinCept"
+       help
+         The Wincept 100/110 is a Motorola single-board computer based on the
+         MPC821 PowerPC, introduced in 1998 and designed to be used in
+         thin-client machines.  Say Y to support it directly.
+
+endchoice
+
+#
+# MPC8xx Communication options
+#
+
+menu "MPC8xx CPM Options"
+       depends on 8xx
+
+config SCC_ENET
+       bool "CPM SCC Ethernet"
+       depends on NET_ETHERNET
+       help
+         Enable Ethernet support via the Motorola MPC8xx serial
+         communications controller.
+
+choice
+       prompt "SCC used for Ethernet"
+       depends on SCC_ENET
+       default SCC1_ENET
+
+config SCC1_ENET
+       bool "SCC1"
+       help
+         Use MPC8xx serial communications controller 1 to drive Ethernet
+         (default).
+
+config SCC2_ENET
+       bool "SCC2"
+       help
+         Use MPC8xx serial communications controller 2 to drive Ethernet.
+
+config SCC3_ENET
+       bool "SCC3"
+       help
+         Use MPC8xx serial communications controller 3 to drive Ethernet.
+
+endchoice
+
+config FEC_ENET
+       bool "860T FEC Ethernet"
+       depends on NET_ETHERNET
+       help
+         Enable Ethernet support via the Fast Ethernet Controller (FCC) on
+         the Motorola MPC8260.
+
+config USE_MDIO
+       bool "Use MDIO for PHY configuration"
+       depends on FEC_ENET
+       help
+         On some boards the hardware configuration of the ethernet PHY can be
+         used without any software interaction over the MDIO interface, so
+         all MII code can be omitted. Say N here if unsure or if you don't
+         need link status reports.
+
+config  FEC_AM79C874
+       bool "Support AMD79C874 PHY"
+       depends on USE_MDIO
+
+config FEC_LXT970
+       bool "Support LXT970 PHY"
+       depends on USE_MDIO
+
+config FEC_LXT971
+       bool "Support LXT971 PHY"
+       depends on USE_MDIO
+       
+config FEC_QS6612
+       bool "Support QS6612 PHY"
+       depends on USE_MDIO
+       
+config ENET_BIG_BUFFERS
+       bool "Use Big CPM Ethernet Buffers"
+       depends on SCC_ENET || FEC_ENET
+       help
+         Allocate large buffers for MPC8xx Ethernet. Increases throughput
+         and decreases the likelihood of dropped packets, but costs memory.
+
+config HTDMSOUND
+       bool "Embedded Planet HIOX Audio"
+       depends on SOUND=y
+
+# This doesn't really belong here, but it is convenient to ask
+# 8xx specific questions.
+comment "Generic MPC8xx Options"
+
+config 8xx_COPYBACK
+       bool "Copy-Back Data Cache (else Writethrough)"
+       help
+         Saying Y here will cause the cache on an MPC8xx processor to be used
+         in Copy-Back mode.  If you say N here, it is used in Writethrough
+         mode.
+
+         If in doubt, say Y here.
+
+config 8xx_CPU6
+       bool "CPU6 Silicon Errata (860 Pre Rev. C)"
+       help
+         MPC860 CPUs, prior to Rev C have some bugs in the silicon, which
+         require workarounds for Linux (and most other OSes to work).  If you
+         get a BUG() very early in boot, this might fix the problem.  For
+         more details read the document entitled "MPC860 Family Device Errata
+         Reference" on Motorola's website.  This option also incurs a
+         performance hit.
+
+         If in doubt, say N here.
+
+choice
+       prompt "Microcode patch selection"
+       default NO_UCODE_PATCH
+       help
+         Help not implemented yet, coming soon.
+
+config NO_UCODE_PATCH
+       bool "None"
+
+config USB_SOF_UCODE_PATCH
+       bool "USB SOF patch"
+       help
+         Help not implemented yet, coming soon.
+
+config I2C_SPI_UCODE_PATCH
+       bool "I2C/SPI relocation patch"
+       help
+         Help not implemented yet, coming soon.
+
+config I2C_SPI_SMC1_UCODE_PATCH
+       bool "I2C/SPI/SMC1 relocation patch"
+       help
+         Help not implemented yet, coming soon.
+
+endchoice
+
+config UCODE_PATCH
+       bool
+       default y
+       depends on !NO_UCODE_PATCH
+
+endmenu
+
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
new file mode 100644 (file)
index 0000000..509622d
--- /dev/null
@@ -0,0 +1,11 @@
+ifeq ($(CONFIG_PPC_MERGE),y)
+obj-$(CONFIG_PPC_PMAC)         += powermac/
+else
+ifeq ($(CONFIG_PPC64),y)
+obj-$(CONFIG_PPC_PMAC)         += powermac/
+endif
+endif
+obj-$(CONFIG_4xx)              += 4xx/
+obj-$(CONFIG_85xx)             += 85xx/
+obj-$(CONFIG_PPC_PSERIES)      += pseries/
+obj-$(CONFIG_PPC_ISERIES)      += iseries/
diff --git a/arch/powerpc/platforms/apus/Kconfig b/arch/powerpc/platforms/apus/Kconfig
new file mode 100644 (file)
index 0000000..6bde3bf
--- /dev/null
@@ -0,0 +1,130 @@
+
+config AMIGA
+       bool
+       depends on APUS
+       default y
+       help
+         This option enables support for the Amiga series of computers.
+
+config ZORRO
+       bool
+       depends on APUS
+       default y
+       help
+         This enables support for the Zorro bus in the Amiga. If you have
+         expansion cards in your Amiga that conform to the Amiga
+         AutoConfig(tm) specification, say Y, otherwise N. Note that even
+         expansion cards that do not fit in the Zorro slots but fit in e.g.
+         the CPU slot may fall in this category, so you have to say Y to let
+         Linux use these.
+
+config ABSTRACT_CONSOLE
+       bool
+       depends on APUS
+       default y
+
+config APUS_FAST_EXCEPT
+       bool
+       depends on APUS
+       default y
+
+config AMIGA_PCMCIA
+       bool "Amiga 1200/600 PCMCIA support"
+       depends on APUS && EXPERIMENTAL
+       help
+         Include support in the kernel for pcmcia on Amiga 1200 and Amiga
+         600. If you intend to use pcmcia cards say Y; otherwise say N.
+
+config AMIGA_BUILTIN_SERIAL
+       tristate "Amiga builtin serial support"
+       depends on APUS
+       help
+         If you want to use your Amiga's built-in serial port in Linux,
+         answer Y.
+
+         To compile this driver as a module, choose M here.
+
+config GVPIOEXT
+       tristate "GVP IO-Extender support"
+       depends on APUS
+       help
+         If you want to use a GVP IO-Extender serial card in Linux, say Y.
+         Otherwise, say N.
+
+config GVPIOEXT_LP
+       tristate "GVP IO-Extender parallel printer support"
+       depends on GVPIOEXT
+       help
+         Say Y to enable driving a printer from the parallel port on your
+         GVP IO-Extender card, N otherwise.
+
+config GVPIOEXT_PLIP
+       tristate "GVP IO-Extender PLIP support"
+       depends on GVPIOEXT
+       help
+         Say Y to enable doing IP over the parallel port on your GVP
+         IO-Extender card, N otherwise.
+
+config MULTIFACE_III_TTY
+       tristate "Multiface Card III serial support"
+       depends on APUS
+       help
+         If you want to use a Multiface III card's serial port in Linux,
+         answer Y.
+
+         To compile this driver as a module, choose M here.
+
+config A2232
+       tristate "Commodore A2232 serial support (EXPERIMENTAL)"
+       depends on EXPERIMENTAL && APUS
+       ---help---
+         This option supports the 2232 7-port serial card shipped with the
+         Amiga 2000 and other Zorro-bus machines, dating from 1989.  At
+         a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip
+         each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The
+         ports were connected with 8 pin DIN connectors on the card bracket,
+         for which 8 pin to DB25 adapters were supplied. The card also had
+         jumpers internally to toggle various pinning configurations.
+
+         This driver can be built as a module; but then "generic_serial"
+         will also be built as a module. This has to be loaded before
+         "ser_a2232". If you want to do this, answer M here.
+
+config WHIPPET_SERIAL
+       tristate "Hisoft Whippet PCMCIA serial support"
+       depends on AMIGA_PCMCIA
+       help
+         HiSoft has a web page at <http://www.hisoft.co.uk/>, but there
+         is no listing for the Whippet in their Amiga section.
+
+config APNE
+       tristate "PCMCIA NE2000 support"
+       depends on AMIGA_PCMCIA
+       help
+         If you have a PCMCIA NE2000 compatible adapter, say Y.  Otherwise,
+         say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called apne.
+
+config SERIAL_CONSOLE
+       bool "Support for serial port console"
+       depends on APUS && (AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y)
+
+config HEARTBEAT
+       bool "Use power LED as a heartbeat"
+       depends on APUS
+       help
+         Use the power-on LED on your machine as a load meter.  The exact
+         behavior is platform-dependent, but normally the flash frequency is
+         a hyperbolic function of the 5-minute load average.
+
+config PROC_HARDWARE
+       bool "/proc/hardware support"
+       depends on APUS
+
+source "drivers/zorro/Kconfig"
+
+config PCI_PERMEDIA
+       bool "PCI for Permedia2"
+       depends on !4xx && !8xx && APUS
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
new file mode 100644 (file)
index 0000000..2d755b7
--- /dev/null
@@ -0,0 +1,305 @@
+choice
+       prompt "Machine Type"
+       depends on EMBEDDED6xx
+
+config KATANA
+       bool "Artesyn-Katana"
+       help
+         Select KATANA if configuring an Artesyn KATANA 750i or 3750
+         cPCI board.
+
+config WILLOW
+       bool "Cogent-Willow"
+
+config CPCI690
+       bool "Force-CPCI690"
+       help
+         Select CPCI690 if configuring a Force CPCI690 cPCI board.
+
+config POWERPMC250
+       bool "Force-PowerPMC250"
+
+config CHESTNUT
+       bool "IBM 750FX Eval board or 750GX Eval board"
+       help
+         Select CHESTNUT if configuring an IBM 750FX Eval Board or a
+         IBM 750GX Eval board.
+
+config SPRUCE
+       bool "IBM-Spruce"
+
+config HDPU
+       bool "Sky-HDPU"
+       help
+         Select HDPU if configuring a Sky Computers Compute Blade.
+
+config HDPU_FEATURES
+       depends HDPU
+       tristate "HDPU-Features"
+       help
+         Select to enable HDPU enhanced features.
+
+config EV64260
+       bool "Marvell-EV64260BP"
+       help
+         Select EV64260 if configuring a Marvell (formerly Galileo)
+         EV64260BP Evaluation platform.
+
+config LOPEC
+       bool "Motorola-LoPEC"
+
+config MVME5100
+       bool "Motorola-MVME5100"
+
+config PPLUS
+       bool "Motorola-PowerPlus"
+
+config PRPMC750
+       bool "Motorola-PrPMC750"
+
+config PRPMC800
+       bool "Motorola-PrPMC800"
+
+config SANDPOINT
+       bool "Motorola-Sandpoint"
+       help
+         Select SANDPOINT if configuring for a Motorola Sandpoint X3
+         (any flavor).
+
+config RADSTONE_PPC7D
+       bool "Radstone Technology PPC7D board"
+
+config PAL4
+       bool "SBS-Palomar4"
+
+config GEMINI
+       bool "Synergy-Gemini"
+       depends on BROKEN
+       help
+         Select Gemini if configuring for a Synergy Microsystems' Gemini
+         series Single Board Computer.  More information is available at:
+         <http://www.synergymicro.com/PressRel/97_10_15.html>.
+
+config EST8260
+       bool "EST8260"
+       ---help---
+         The EST8260 is a single-board computer manufactured by Wind River
+         Systems, Inc. (formerly Embedded Support Tools Corp.) and based on
+         the MPC8260.  Wind River Systems has a website at
+         <http://www.windriver.com/>, but the EST8260 cannot be found on it
+         and has probably been discontinued or rebadged.
+
+config SBC82xx
+       bool "SBC82xx"
+       ---help---
+         SBC PowerQUICC II, single-board computer with MPC82xx CPU
+         Manufacturer: Wind River Systems, Inc.
+         Date of Release: May 2003
+         End of Life: -
+         URL: <http://www.windriver.com/>
+
+config SBS8260
+       bool "SBS8260"
+
+config RPX8260
+       bool "RPXSUPER"
+
+config TQM8260
+       bool "TQM8260"
+       ---help---
+         MPC8260 based module, little larger than credit card,
+         up to 128 MB global + 64 MB local RAM, 32 MB Flash,
+         32 kB EEPROM, 256 kB L@ Cache, 10baseT + 100baseT Ethernet,
+         2 x serial ports, ...
+         Manufacturer: TQ Components, www.tq-group.de
+         Date of Release: June 2001
+         End of Life: not yet :-)
+         URL: <http://www.denx.de/PDF/TQM82xx_SPEC_Rev005.pdf>
+
+config ADS8272
+       bool "ADS8272"
+
+config PQ2FADS
+       bool "Freescale-PQ2FADS"
+       help
+         Select PQ2FADS if you wish to configure for a Freescale
+         PQ2FADS board (-VR or -ZU).
+
+config LITE5200
+       bool "Freescale LITE5200 / (IceCube)"
+       select PPC_MPC52xx
+       help
+         Support for the LITE5200 dev board for the MPC5200 from Freescale.
+         This is for the LITE5200 version 2.0 board. Don't know if it changes
+         much but it's only been tested on this board version. I think this
+         board is also known as IceCube.
+
+config MPC834x_SYS
+       bool "Freescale MPC834x SYS"
+       help
+         This option enables support for the MPC 834x SYS evaluation board.
+
+         Be aware that PCI buses can only function when SYS board is plugged
+         into the PIB (Platform IO Board) board from Freescale which provide
+         3 PCI slots.  The PIBs PCI initialization is the bootloader's
+         responsiblilty.
+
+config EV64360
+       bool "Marvell-EV64360BP"
+       help
+         Select EV64360 if configuring a Marvell EV64360BP Evaluation
+         platform.
+endchoice
+
+config PQ2ADS
+       bool
+       depends on ADS8272
+       default y
+
+config TQM8xxL
+       bool
+       depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L)
+       default y
+
+config PPC_MPC52xx
+       bool
+
+config 8260
+       bool "CPM2 Support" if WILLOW
+       depends on 6xx
+       default y if TQM8260 || RPX8260 || EST8260 || SBS8260 || SBC82xx || PQ2FADS
+       help
+         The MPC8260 is a typical embedded CPU made by Motorola.  Selecting
+         this option means that you wish to build a kernel for a machine with
+         an 8260 class CPU.
+
+config 8272
+       bool
+       depends on 6xx
+       default y if ADS8272
+       select 8260
+       help
+         The MPC8272 CPM has a different internal dpram setup than other CPM2
+         devices
+
+config 83xx
+       bool
+       default y if MPC834x_SYS
+
+config MPC834x
+       bool
+       default y if MPC834x_SYS
+
+config CPM2
+       bool
+       depends on 8260 || MPC8560 || MPC8555
+       default y
+       help
+         The CPM2 (Communications Processor Module) is a coprocessor on
+         embedded CPUs made by Motorola.  Selecting this option means that
+         you wish to build a kernel for a machine with a CPM2 coprocessor
+         on it (826x, 827x, 8560).
+
+config PPC_GEN550
+       bool
+       depends on SANDPOINT || SPRUCE || PPLUS || \
+               PRPMC750 || PRPMC800 || LOPEC || \
+               (EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
+               83xx
+       default y
+
+config FORCE
+       bool
+       depends on 6xx && POWERPMC250
+       default y
+
+config GT64260
+       bool
+       depends on EV64260 || CPCI690
+       default y
+
+config MV64360         # Really MV64360 & MV64460
+       bool
+       depends on CHESTNUT || KATANA || RADSTONE_PPC7D || HDPU || EV64360
+       default y
+
+config MV64X60
+       bool
+       depends on (GT64260 || MV64360)
+       default y
+
+menu "Set bridge options"
+       depends on MV64X60
+
+config NOT_COHERENT_CACHE
+       bool "Turn off Cache Coherency"
+       default n
+       help
+         Some 64x60 bridges lock up when trying to enforce cache coherency.
+         When this option is selected, cache coherency will be turned off.
+         Note that this can cause other problems (e.g., stale data being
+         speculatively loaded via a cached mapping).  Use at your own risk.
+
+config MV64X60_BASE
+       hex "Set bridge base used by firmware"
+       default "0xf1000000"
+       help
+         A firmware can leave the base address of the bridge's registers at
+         a non-standard location.  If so, set this value to reflect the
+         address of that non-standard location.
+
+config MV64X60_NEW_BASE
+       hex "Set bridge base used by kernel"
+       default "0xf1000000"
+       help
+         If the current base address of the bridge's registers is not where
+         you want it, set this value to the address that you want it moved to.
+
+endmenu
+
+config NONMONARCH_SUPPORT
+       bool "Enable Non-Monarch Support"
+       depends on PRPMC800
+
+config HARRIER
+       bool
+       depends on PRPMC800
+       default y
+
+config EPIC_SERIAL_MODE
+       bool
+       depends on 6xx && (LOPEC || SANDPOINT)
+       default y
+
+config MPC10X_BRIDGE
+       bool
+       depends on POWERPMC250 || LOPEC || SANDPOINT
+       default y
+
+config MPC10X_OPENPIC
+       bool
+       depends on POWERPMC250 || LOPEC || SANDPOINT
+       default y
+
+config MPC10X_STORE_GATHERING
+       bool "Enable MPC10x store gathering"
+       depends on MPC10X_BRIDGE
+
+config SANDPOINT_ENABLE_UART1
+       bool "Enable DUART mode on Sandpoint"
+       depends on SANDPOINT
+       help
+         If this option is enabled then the MPC824x processor will run
+         in DUART mode instead of UART mode.
+
+config HARRIER_STORE_GATHERING
+       bool "Enable Harrier store gathering"
+       depends on HARRIER
+
+config MVME5100_IPMC761_PRESENT
+       bool "MVME5100 configured with an IPMC761"
+       depends on MVME5100
+
+config SPRUCE_BAUD_33M
+       bool "Spruce baud clock support"
+       depends on SPRUCE
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
new file mode 100644 (file)
index 0000000..3d957a3
--- /dev/null
@@ -0,0 +1,31 @@
+
+menu "iSeries device drivers"
+       depends on PPC_ISERIES
+
+config VIOCONS
+       tristate "iSeries Virtual Console Support"
+
+config VIODASD
+       tristate "iSeries Virtual I/O disk support"
+       help
+         If you are running on an iSeries system and you want to use
+         virtual disks created and managed by OS/400, say Y.
+
+config VIOCD
+       tristate "iSeries Virtual I/O CD support"
+       help
+         If you are running Linux on an IBM iSeries system and you want to
+         read a CD drive owned by OS/400, say Y here.
+
+config VIOTAPE
+       tristate "iSeries Virtual Tape Support"
+       help
+         If you are running Linux on an iSeries system and you want Linux
+         to read and/or write a tape drive owned by OS/400, say Y here.
+
+endmenu
+
+config VIOPATH
+       bool
+       depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
+       default y
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
new file mode 100644 (file)
index 0000000..127b465
--- /dev/null
@@ -0,0 +1,9 @@
+EXTRA_CFLAGS   += -mno-minimal-toc
+
+obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o mf.o lpevents.o \
+       hvcall.o proc.o htab.o iommu.o misc.o
+obj-$(CONFIG_PCI) += pci.o irq.o vpdinfo.o
+obj-$(CONFIG_IBMVIO) += vio.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_VIOPATH) += viopath.o
+obj-$(CONFIG_MODULES) += ksyms.o
similarity index 96%
rename from include/asm-ppc64/iSeries/HvCallHpt.h
rename to arch/powerpc/platforms/iseries/call_hpt.h
index 43a1969..321f3bb 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvCallHpt.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
-#ifndef _HVCALLHPT_H
-#define _HVCALLHPT_H
+#ifndef _PLATFORMS_ISERIES_CALL_HPT_H
+#define _PLATFORMS_ISERIES_CALL_HPT_H
 
 /*
  * This file contains the "hypervisor call" interface which is used to
@@ -99,4 +98,4 @@ static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit, hpte_t *hpte)
        HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
 }
 
-#endif /* _HVCALLHPT_H */
+#endif /* _PLATFORMS_ISERIES_CALL_HPT_H */
similarity index 56%
rename from include/asm-ppc64/iSeries/HvCallPci.h
rename to arch/powerpc/platforms/iseries/call_pci.h
index c8d675c..a86e065 100644 (file)
@@ -22,8 +22,8 @@
  *   Created, Jan 9, 2001
  */
 
-#ifndef _HVCALLPCI_H
-#define _HVCALLPCI_H
+#ifndef _PLATFORMS_ISERIES_CALL_PCI_H
+#define _PLATFORMS_ISERIES_CALL_PCI_H
 
 #include <asm/iSeries/HvCallSc.h>
 #include <asm/iSeries/HvTypes.h>
@@ -126,25 +126,6 @@ enum HvCallPci_VpdType {
 #define HvCallPciUnmaskInterrupts      HvCallPci + 49
 #define HvCallPciGetBusUnitInfo                HvCallPci + 50
 
-static inline u64 HvCallPci_configLoad8(u16 busNumber, u8 subBusNumber,
-               u8 deviceId, u32 offset, u8 *value)
-{
-       struct HvCallPci_DsaAddr dsa;
-       struct HvCallPci_LoadReturn retVal;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumber;
-       dsa.subBusNumber = subBusNumber;
-       dsa.deviceId = deviceId;
-
-       HvCall3Ret16(HvCallPciConfigLoad8, &retVal, *(u64 *)&dsa, offset, 0);
-
-       *value = retVal.value;
-
-       return retVal.rc;
-}
-
 static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
                u8 deviceId, u32 offset, u16 *value)
 {
@@ -164,25 +145,6 @@ static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
        return retVal.rc;
 }
 
-static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber,
-               u8 deviceId, u32 offset, u32 *value)
-{
-       struct HvCallPci_DsaAddr dsa;
-       struct HvCallPci_LoadReturn retVal;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumber;
-       dsa.subBusNumber = subBusNumber;
-       dsa.deviceId = deviceId;
-
-       HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0);
-
-       *value = retVal.value;
-
-       return retVal.rc;
-}
-
 static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
                u8 deviceId, u32 offset, u8 value)
 {
@@ -197,186 +159,6 @@ static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
        return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0);
 }
 
-static inline u64 HvCallPci_configStore16(u16 busNumber, u8 subBusNumber,
-               u8 deviceId, u32 offset, u16 value)
-{
-       struct HvCallPci_DsaAddr dsa;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumber;
-       dsa.subBusNumber = subBusNumber;
-       dsa.deviceId = deviceId;
-
-       return HvCall4(HvCallPciConfigStore16, *(u64 *)&dsa, offset, value, 0);
-}
-
-static inline u64 HvCallPci_configStore32(u16 busNumber, u8 subBusNumber,
-               u8 deviceId, u32 offset, u32 value)
-{
-       struct HvCallPci_DsaAddr dsa;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumber;
-       dsa.subBusNumber = subBusNumber;
-       dsa.deviceId = deviceId;
-
-       return HvCall4(HvCallPciConfigStore32, *(u64 *)&dsa, offset, value, 0);
-}
-
-static inline u64 HvCallPci_barLoad8(u16 busNumberParm, u8 subBusParm,
-               u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
-               u8 *valueParm)
-{
-       struct HvCallPci_DsaAddr dsa;
-       struct HvCallPci_LoadReturn retVal;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumberParm;
-       dsa.subBusNumber = subBusParm;
-       dsa.deviceId = deviceIdParm;
-       dsa.barNumber = barNumberParm;
-
-       HvCall3Ret16(HvCallPciBarLoad8, &retVal, *(u64 *)&dsa, offsetParm, 0);
-
-       *valueParm = retVal.value;
-
-       return retVal.rc;
-}
-
-static inline u64 HvCallPci_barLoad16(u16 busNumberParm, u8 subBusParm,
-               u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
-               u16 *valueParm)
-{
-       struct HvCallPci_DsaAddr dsa;
-       struct HvCallPci_LoadReturn retVal;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumberParm;
-       dsa.subBusNumber = subBusParm;
-       dsa.deviceId = deviceIdParm;
-       dsa.barNumber = barNumberParm;
-
-       HvCall3Ret16(HvCallPciBarLoad16, &retVal, *(u64 *)&dsa, offsetParm, 0);
-
-       *valueParm = retVal.value;
-
-       return retVal.rc;
-}
-
-static inline u64 HvCallPci_barLoad32(u16 busNumberParm, u8 subBusParm,
-               u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
-               u32 *valueParm)
-{
-       struct HvCallPci_DsaAddr dsa;
-       struct HvCallPci_LoadReturn retVal;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumberParm;
-       dsa.subBusNumber = subBusParm;
-       dsa.deviceId = deviceIdParm;
-       dsa.barNumber = barNumberParm;
-
-       HvCall3Ret16(HvCallPciBarLoad32, &retVal, *(u64 *)&dsa, offsetParm, 0);
-
-       *valueParm = retVal.value;
-
-       return retVal.rc;
-}
-
-static inline u64 HvCallPci_barLoad64(u16 busNumberParm, u8 subBusParm,
-               u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
-               u64 *valueParm)
-{
-       struct HvCallPci_DsaAddr dsa;
-       struct HvCallPci_LoadReturn retVal;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumberParm;
-       dsa.subBusNumber = subBusParm;
-       dsa.deviceId = deviceIdParm;
-       dsa.barNumber = barNumberParm;
-
-       HvCall3Ret16(HvCallPciBarLoad64, &retVal, *(u64 *)&dsa, offsetParm, 0);
-
-       *valueParm = retVal.value;
-
-       return retVal.rc;
-}
-
-static inline u64 HvCallPci_barStore8(u16 busNumberParm, u8 subBusParm,
-               u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
-               u8 valueParm)
-{
-       struct HvCallPci_DsaAddr dsa;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumberParm;
-       dsa.subBusNumber = subBusParm;
-       dsa.deviceId = deviceIdParm;
-       dsa.barNumber = barNumberParm;
-
-       return HvCall4(HvCallPciBarStore8, *(u64 *)&dsa, offsetParm,
-                       valueParm, 0);
-}
-
-static inline u64 HvCallPci_barStore16(u16 busNumberParm, u8 subBusParm,
-               u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
-               u16 valueParm)
-{
-       struct HvCallPci_DsaAddr dsa;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumberParm;
-       dsa.subBusNumber = subBusParm;
-       dsa.deviceId = deviceIdParm;
-       dsa.barNumber = barNumberParm;
-
-       return HvCall4(HvCallPciBarStore16, *(u64 *)&dsa, offsetParm,
-                       valueParm, 0);
-}
-
-static inline u64 HvCallPci_barStore32(u16 busNumberParm, u8 subBusParm,
-               u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
-               u32 valueParm)
-{
-       struct HvCallPci_DsaAddr dsa;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumberParm;
-       dsa.subBusNumber = subBusParm;
-       dsa.deviceId = deviceIdParm;
-       dsa.barNumber = barNumberParm;
-
-       return HvCall4(HvCallPciBarStore32, *(u64 *)&dsa, offsetParm,
-                       valueParm, 0);
-}
-
-static inline u64 HvCallPci_barStore64(u16 busNumberParm, u8 subBusParm,
-               u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
-               u64 valueParm)
-{
-       struct HvCallPci_DsaAddr dsa;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumberParm;
-       dsa.subBusNumber = subBusParm;
-       dsa.deviceId = deviceIdParm;
-       dsa.barNumber = barNumberParm;
-
-       return HvCall4(HvCallPciBarStore64, *(u64 *)&dsa, offsetParm,
-                       valueParm, 0);
-}
-
 static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm,
                u8 deviceIdParm)
 {
@@ -437,20 +219,6 @@ static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, u8 subBusParm,
        return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask);
 }
 
-static inline u64 HvCallPci_setSlotReset(u16 busNumberParm, u8 subBusParm,
-               u8 deviceIdParm, u64 onNotOff)
-{
-       struct HvCallPci_DsaAddr dsa;
-
-       *((u64*)&dsa) = 0;
-
-       dsa.busNumber = busNumberParm;
-       dsa.subBusNumber = subBusParm;
-       dsa.deviceId = deviceIdParm;
-
-       return HvCall2(HvCallPciSetSlotReset, *(u64*)&dsa, onNotOff);
-}
-
 static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm,
                u8 deviceNumberParm, u64 parms, u32 sizeofParms)
 {
@@ -519,15 +287,4 @@ static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm,
                return xRc & 0xFFFF;
 }
 
-static inline int HvCallPci_getBusAdapterVpd(u16 busNumParm, u64 destParm,
-               u16 sizeParm)
-{
-       u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm,
-                       sizeParm, HvCallPci_BusAdapterVpd);
-       if (xRc == -1)
-               return -1;
-       else
-               return xRc & 0xFFFF;
-}
-
-#endif /* _HVCALLPCI_H */
+#endif /* _PLATFORMS_ISERIES_CALL_PCI_H */
similarity index 93%
rename from include/asm-ppc64/iSeries/HvCallSm.h
rename to arch/powerpc/platforms/iseries/call_sm.h
index 8a3dbb0..ef22316 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvCallSm.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
-#ifndef _HVCALLSM_H
-#define _HVCALLSM_H
+#ifndef _ISERIES_CALL_SM_H
+#define _ISERIES_CALL_SM_H
 
 /*
  * This file contains the "hypervisor call" interface which is used to
@@ -35,4 +34,4 @@ static inline u64 HvCallSm_get64BitsOfAccessMap(HvLpIndex lpIndex,
        return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap);
 }
 
-#endif /* _HVCALLSM_H */
+#endif /* _ISERIES_CALL_SM_H */
similarity index 87%
rename from arch/ppc64/kernel/iSeries_htab.c
rename to arch/powerpc/platforms/iseries/htab.c
index 073b766..b3c6c33 100644 (file)
@@ -1,10 +1,10 @@
 /*
  * iSeries hashtable management.
- *     Derived from pSeries_htab.c
+ *     Derived from pSeries_htab.c
  *
  * SMP scalability work:
  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version
 #include <asm/pgtable.h>
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
-#include <asm/iSeries/HvCallHpt.h>
 #include <asm/abs_addr.h>
 #include <linux/spinlock.h>
 
-static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = { [0 ... 63] = SPIN_LOCK_UNLOCKED};
+#include "call_hpt.h"
+
+static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp =
+       { [0 ... 63] = SPIN_LOCK_UNLOCKED};
 
 /*
  * Very primitive algorithm for picking up a lock
@@ -84,6 +86,25 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
        return (secondary << 3) | (slot & 7);
 }
 
+long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
+               unsigned long va, unsigned long prpn, unsigned long vflags,
+               unsigned long rflags)
+{
+       long slot;
+       hpte_t lhpte;
+
+       slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
+
+       if (lhpte.v & HPTE_V_VALID) {
+               /* Bolt the existing HPTE */
+               HvCallHpt_setSwBits(slot, 0x10, 0);
+               HvCallHpt_setPp(slot, PP_RWXX);
+               return 0;
+       }
+
+       return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags);
+}
+
 static unsigned long iSeries_hpte_getword0(unsigned long slot)
 {
        hpte_t hpte;
@@ -107,7 +128,7 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
                hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
 
                if (! (hpte_v & HPTE_V_BOLTED)) {
-                       HvCallHpt_invalidateSetSwBitsGet(hpte_group + 
+                       HvCallHpt_invalidateSetSwBitsGet(hpte_group +
                                                         slot_offset, 0, 0);
                        iSeries_hunlock(hpte_group);
                        return i;
@@ -124,9 +145,9 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
 
 /*
  * The HyperVisor expects the "flags" argument in this form:
- *     bits  0..59 : reserved
- *     bit      60 : N
- *     bits 61..63 : PP2,PP1,PP0
+ *     bits  0..59 : reserved
+ *     bit      60 : N
+ *     bits 61..63 : PP2,PP1,PP0
  */
 static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
                                  unsigned long va, int large, int local)
@@ -152,7 +173,7 @@ static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
 }
 
 /*
- * Functions used to find the PTE for a particular virtual address. 
+ * Functions used to find the PTE for a particular virtual address.
  * Only used during boot when bolting pages.
  *
  * Input : vpn      : virtual page number
@@ -170,7 +191,7 @@ static long iSeries_hpte_find(unsigned long vpn)
         * 0x00000000xxxxxxxx : Entry found in primary group, slot x
         * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
         */
-       slot = HvCallHpt_findValid(&hpte, vpn); 
+       slot = HvCallHpt_findValid(&hpte, vpn);
        if (hpte.v & HPTE_V_VALID) {
                if (slot < 0) {
                        slot &= 0x7fffffffffffffff;
@@ -197,7 +218,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
        vsid = get_kernel_vsid(ea);
        va = (vsid << 28) | (ea & 0x0fffffff);
        vpn = va >> PAGE_SHIFT;
-       slot = iSeries_hpte_find(vpn); 
+       slot = iSeries_hpte_find(vpn);
        if (slot == -1)
                panic("updateboltedpp: Could not find page to bolt\n");
        HvCallHpt_setPp(slot, newpp);
@@ -215,7 +236,7 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
        iSeries_hlock(slot);
 
        hpte_v = iSeries_hpte_getword0(slot);
-       
+
        if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
                HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
 
@@ -230,7 +251,7 @@ void hpte_init_iSeries(void)
        ppc_md.hpte_updatepp    = iSeries_hpte_updatepp;
        ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
        ppc_md.hpte_insert      = iSeries_hpte_insert;
-       ppc_md.hpte_remove      = iSeries_hpte_remove;
+       ppc_md.hpte_remove      = iSeries_hpte_remove;
 
        htab_finish_init();
 }
similarity index 95%
rename from arch/ppc64/kernel/hvCall.S
rename to arch/powerpc/platforms/iseries/hvcall.S
index 4c699ea..07ae6ad 100644 (file)
@@ -1,7 +1,4 @@
 /*
- * arch/ppc64/kernel/hvCall.S
- *
- *
  * This file contains the code to perform calls to the
  * iSeries LPAR hypervisor
  *
 
 #include <asm/ppc_asm.h>
 #include <asm/processor.h>
+#include <asm/ptrace.h>                /* XXX for STACK_FRAME_OVERHEAD */
 
        .text
 
-/* 
+/*
  * Hypervisor call
- * 
+ *
  * Invoke the iSeries hypervisor via the System Call instruction
  * Parameters are passed to this routine in registers r3 - r10
- * 
+ *
  * r3 contains the HV function to be called
  * r4-r10 contain the operands to the hypervisor function
  *
@@ -41,11 +39,11 @@ _GLOBAL(HvCall7)
        mfcr    r0
        std     r0,-8(r1)
        stdu    r1,-(STACK_FRAME_OVERHEAD+16)(r1)
-       
+
        /* r0 = 0xffffffffffffffff indicates a hypervisor call */
-       
+
        li      r0,-1
-       
+
        /* Invoke the hypervisor */
 
        sc
@@ -55,7 +53,7 @@ _GLOBAL(HvCall7)
        mtcrf   0xff,r0
 
        /*  return to caller, return value in r3 */
-       
+
        blr
 
 _GLOBAL(HvCall0Ret16)
@@ -92,7 +90,5 @@ _GLOBAL(HvCall7Ret16)
        ld      r0,-8(r1)
        mtcrf   0xff,r0
        ld      r31,-16(r1)
-       
-       blr
-
 
+       blr
similarity index 98%
rename from arch/ppc64/kernel/HvCall.c
rename to arch/powerpc/platforms/iseries/hvlog.c
index b772e65..f61e2e9 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvCall.c
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  * 
  * This program is free software; you can redistribute it and/or modify
similarity index 98%
rename from arch/ppc64/kernel/HvLpConfig.c
rename to arch/powerpc/platforms/iseries/hvlpconfig.c
index cb1d647..dc28621 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvLpConfig.c
  * Copyright (C) 2001  Kyle A. Lucke, IBM Corporation
  * 
  * This program is free software; you can redistribute it and/or modify
similarity index 82%
rename from arch/ppc64/kernel/iSeries_iommu.c
rename to arch/powerpc/platforms/iseries/iommu.c
index f8ff1bb..1db26d8 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * arch/ppc64/kernel/iSeries_iommu.c
- *
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
  *
  * Rewrite, cleanup:
 #include <linux/list.h>
 
 #include <asm/iommu.h>
+#include <asm/tce.h>
 #include <asm/machdep.h>
+#include <asm/abs_addr.h>
+#include <asm/pci-bridge.h>
 #include <asm/iSeries/HvCallXm.h>
-#include <asm/iSeries/iSeries_pci.h>
 
 extern struct list_head iSeries_Global_Device_List;
 
@@ -90,15 +90,16 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
  */
 static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
 {
-       struct iSeries_Device_Node *dp;
-
-       list_for_each_entry(dp, &iSeries_Global_Device_List, Device_List) {
-               if ((dp->iommu_table != NULL) &&
-                   (dp->iommu_table->it_type == TCE_PCI) &&
-                   (dp->iommu_table->it_offset == tbl->it_offset) &&
-                   (dp->iommu_table->it_index == tbl->it_index) &&
-                   (dp->iommu_table->it_size == tbl->it_size))
-                       return dp->iommu_table;
+       struct pci_dn *pdn;
+
+       list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
+               struct iommu_table *it = pdn->iommu_table;
+               if ((it != NULL) &&
+                   (it->it_type == TCE_PCI) &&
+                   (it->it_offset == tbl->it_offset) &&
+                   (it->it_index == tbl->it_index) &&
+                   (it->it_size == tbl->it_size))
+                       return it;
        }
        return NULL;
 }
@@ -112,7 +113,7 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
  * 2. TCE table per Bus.
  * 3. TCE Table per IOA.
  */
-static void iommu_table_getparms(struct iSeries_Device_Node* dn,
+static void iommu_table_getparms(struct pci_dn *pdn,
                                 struct iommu_table* tbl)
 {
        struct iommu_table_cb *parms;
@@ -123,11 +124,11 @@ static void iommu_table_getparms(struct iSeries_Device_Node* dn,
 
        memset(parms, 0, sizeof(*parms));
 
-       parms->itc_busno = ISERIES_BUS(dn);
-       parms->itc_slotno = dn->LogicalSlot;
+       parms->itc_busno = pdn->busno;
+       parms->itc_slotno = pdn->LogicalSlot;
        parms->itc_virtbus = 0;
 
-       HvCallXm_getTceTableParms(ISERIES_HV_ADDR(parms));
+       HvCallXm_getTceTableParms(iseries_hv_addr(parms));
 
        if (parms->itc_size == 0)
                panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
@@ -144,18 +145,19 @@ static void iommu_table_getparms(struct iSeries_Device_Node* dn,
 }
 
 
-void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn)
+void iommu_devnode_init_iSeries(struct device_node *dn)
 {
        struct iommu_table *tbl;
+       struct pci_dn *pdn = PCI_DN(dn);
 
        tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
 
-       iommu_table_getparms(dn, tbl);
+       iommu_table_getparms(pdn, tbl);
 
        /* Look for existing tce table */
-       dn->iommu_table = iommu_table_find(tbl);
-       if (dn->iommu_table == NULL)
-               dn->iommu_table = iommu_init_table(tbl);
+       pdn->iommu_table = iommu_table_find(tbl);
+       if (pdn->iommu_table == NULL)
+               pdn->iommu_table = iommu_init_table(tbl);
        else
                kfree(tbl);
 }
similarity index 96%
rename from include/asm-ppc64/iSeries/ItIplParmsReal.h
rename to arch/powerpc/platforms/iseries/ipl_parms.h
index ae3417d..77c135d 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * ItIplParmsReal.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
-#ifndef _ITIPLPARMSREAL_H
-#define _ITIPLPARMSREAL_H
+#ifndef _ISERIES_IPL_PARMS_H
+#define _ISERIES_IPL_PARMS_H
 
 /*
  *     This struct maps the IPL Parameters DMA'd from the SP.
@@ -68,4 +67,4 @@ struct ItIplParmsReal {
 
 extern struct ItIplParmsReal   xItIplParmsReal;
 
-#endif /* _ITIPLPARMSREAL_H */
+#endif /* _ISERIES_IPL_PARMS_H */
similarity index 98%
rename from arch/ppc64/kernel/iSeries_irq.c
rename to arch/powerpc/platforms/iseries/irq.c
index 77376c1..937ac99 100644 (file)
 #include <asm/ppcdebug.h>
 #include <asm/iSeries/HvTypes.h>
 #include <asm/iSeries/HvLpEvent.h>
-#include <asm/iSeries/HvCallPci.h>
 #include <asm/iSeries/HvCallXm.h>
-#include <asm/iSeries/iSeries_irq.h>
+
+#include "irq.h"
+#include "call_pci.h"
 
 /* This maps virtual irq numbers to real irqs */
 unsigned int virt_irq_to_real_map[NR_IRQS];
@@ -351,3 +352,15 @@ int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
        irq_desc[virtirq].handler = &iSeries_IRQ_handler;
        return virtirq;
 }
+
+int virt_irq_create_mapping(unsigned int real_irq)
+{
+       BUG(); /* Don't call this on iSeries, yet */
+
+       return 0;
+}
+
+void virt_irq_init(void)
+{
+       return;
+}
similarity index 64%
rename from include/asm-ppc64/iSeries/iSeries_irq.h
rename to arch/powerpc/platforms/iseries/irq.h
index 6c9767a..5f643f1 100644 (file)
@@ -1,8 +1,8 @@
-#ifndef        __ISERIES_IRQ_H__
-#define        __ISERIES_IRQ_H__
+#ifndef        _ISERIES_IRQ_H
+#define        _ISERIES_IRQ_H
 
 extern void iSeries_init_IRQ(void);
 extern int  iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId);
 extern void iSeries_activate_IRQs(void);
 
-#endif /* __ISERIES_IRQ_H__ */
+#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
new file mode 100644 (file)
index 0000000..f271b35
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * (C) 2001-2005 PPC 64 Team, IBM Corp
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+
+#include <asm/hw_irq.h>
+#include <asm/iSeries/HvCallSc.h>
+
+EXPORT_SYMBOL(HvCall0);
+EXPORT_SYMBOL(HvCall1);
+EXPORT_SYMBOL(HvCall2);
+EXPORT_SYMBOL(HvCall3);
+EXPORT_SYMBOL(HvCall4);
+EXPORT_SYMBOL(HvCall5);
+EXPORT_SYMBOL(HvCall6);
+EXPORT_SYMBOL(HvCall7);
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(local_get_flags);
+EXPORT_SYMBOL(local_irq_disable);
+EXPORT_SYMBOL(local_irq_restore);
+#endif
similarity index 95%
rename from arch/ppc64/kernel/LparData.c
rename to arch/powerpc/platforms/iseries/lpardata.c
index 0a9c23c..ed2ffee 100644 (file)
@@ -1,4 +1,4 @@
-/* 
+/*
  * Copyright 2001 Mike Corrigan, IBM Corp
  *
  * This program is free software; you can redistribute it and/or
 #include <asm/lppaca.h>
 #include <asm/iSeries/ItLpRegSave.h>
 #include <asm/paca.h>
-#include <asm/iSeries/HvReleaseData.h>
 #include <asm/iSeries/LparMap.h>
-#include <asm/iSeries/ItVpdAreas.h>
-#include <asm/iSeries/ItIplParmsReal.h>
 #include <asm/iSeries/ItExtVpdPanel.h>
 #include <asm/iSeries/ItLpQueue.h>
-#include <asm/iSeries/IoHriProcessorVpd.h>
-#include <asm/iSeries/ItSpCommArea.h>
 
+#include "vpd_areas.h"
+#include "spcomm_area.h"
+#include "ipl_parms.h"
+#include "processor_vpd.h"
+#include "release_data.h"
 
-/* The HvReleaseData is the root of the information shared between 
- * the hypervisor and Linux.  
+/* The HvReleaseData is the root of the information shared between
+ * the hypervisor and Linux.
  */
 struct HvReleaseData hvReleaseData = {
        .xDesc = 0xc8a5d9c4,    /* "HvRD" ebcdic */
@@ -79,7 +79,7 @@ extern void trap_0e_iSeries(void);
 extern void performance_monitor_iSeries(void);
 extern void data_access_slb_iSeries(void);
 extern void instruction_access_slb_iSeries(void);
-       
+
 struct ItLpNaca itLpNaca = {
        .xDesc = 0xd397d581,            /* "LpNa" ebcdic */
        .xSize = 0x0400,                /* size of ItLpNaca */
@@ -106,7 +106,7 @@ struct ItLpNaca itLpNaca = {
        .xLoadAreaChunks = 0,           /* chunks for load area */
        .xPaseSysCallCRMask = 0,        /* PASE mask */
        .xSlicSegmentTablePtr = 0,      /* seg table */
-       .xOldLpQueue = { 0 },           /* Old LP Queue */
+       .xOldLpQueue = { 0 },           /* Old LP Queue */
        .xInterruptHdlr = {
                (u64)system_reset_iSeries,      /* 0x100 System Reset */
                (u64)machine_check_iSeries,     /* 0x200 Machine Check */
@@ -134,7 +134,7 @@ struct ItLpNaca itLpNaca = {
 EXPORT_SYMBOL(itLpNaca);
 
 /* May be filled in by the hypervisor so cannot end up in the BSS */
-struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data"))); 
+struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
 
 /* May be filled in by the hypervisor so cannot end up in the BSS */
 struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
@@ -151,7 +151,7 @@ struct IoHriProcessorVpd xIoHriProcessorVpd[maxPhysicalProcessors] = {
                .xPVR = 0x3600
        }
 };
-       
+
 /* Space for Main Store Vpd 27,200 bytes */
 /* May be filled in by the hypervisor so cannot end up in the BSS */
 u64    xMsVpd[3400] __attribute__((__section__(".data")));
@@ -197,7 +197,7 @@ struct ItVpdAreas itVpdAreas = {
                26992,                  /*       7 length of MS VPD */
                0,                      /*       8 */
                sizeof(struct ItLpNaca),/*       9 length of LP Naca */
-               0,                      /*      10 */
+               0,                      /*      10 */
                256,                    /*      11 length of Recovery Log Buf */
                sizeof(struct SpCommArea), /*   12 length of SP Comm Area */
                0,0,0,                  /* 13 - 15 */
@@ -207,7 +207,7 @@ struct ItVpdAreas itVpdAreas = {
                0,0                     /* 24 - 25 */
                },
        .xSlicVpdAdrs = {                       /* VPD addresses */
-               0,0,0,                  /*       0 -  2 */
+               0,0,0,                  /*       0 -  2 */
                &xItExtVpdPanel,        /*       3 Extended VPD */
                &paca[0],               /*       4 first Paca */
                0,                      /*       5 */
similarity index 78%
rename from arch/ppc64/kernel/ItLpQueue.c
rename to arch/powerpc/platforms/iseries/lpevents.c
index 4231861..f8b4155 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * ItLpQueue.c
  * Copyright (C) 2001 Mike Corrigan  IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
 #include <linux/bootmem.h>
 #include <linux/seq_file.h>
 #include <linux/proc_fs.h>
+#include <linux/module.h>
+
 #include <asm/system.h>
 #include <asm/paca.h>
 #include <asm/iSeries/ItLpQueue.h>
 #include <asm/iSeries/HvLpEvent.h>
 #include <asm/iSeries/HvCallEvent.h>
+#include <asm/iSeries/ItLpNaca.h>
 
 /*
  * The LpQueue is used to pass event data from the hypervisor to
@@ -43,7 +45,8 @@ static char *event_types[HvLpEvent_Type_NumTypes] = {
 };
 
 /* Array of LpEvent handler functions */
-extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
+static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
+static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
 
 static struct HvLpEvent * get_next_hvlpevent(void)
 {
@@ -199,6 +202,70 @@ void setup_hvlpevent_queue(void)
        hvlpevent_queue.xIndex = 0;
 }
 
+/* Register a handler for an LpEvent type */
+int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
+{
+       if (eventType < HvLpEvent_Type_NumTypes) {
+               lpEventHandler[eventType] = handler;
+               return 0;
+       }
+       return 1;
+}
+EXPORT_SYMBOL(HvLpEvent_registerHandler);
+
+int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
+{
+       might_sleep();
+
+       if (eventType < HvLpEvent_Type_NumTypes) {
+               if (!lpEventHandlerPaths[eventType]) {
+                       lpEventHandler[eventType] = NULL;
+                       /*
+                        * We now sleep until all other CPUs have scheduled.
+                        * This ensures that the deletion is seen by all
+                        * other CPUs, and that the deleted handler isn't
+                        * still running on another CPU when we return.
+                        */
+                       synchronize_rcu();
+                       return 0;
+               }
+       }
+       return 1;
+}
+EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
+
+/*
+ * lpIndex is the partition index of the target partition.
+ * needed only for VirtualIo, VirtualLan and SessionMgr.  Zero
+ * indicates to use our partition index - for the other types.
+ */
+int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
+{
+       if ((eventType < HvLpEvent_Type_NumTypes) &&
+                       lpEventHandler[eventType]) {
+               if (lpIndex == 0)
+                       lpIndex = itLpNaca.xLpIndex;
+               HvCallEvent_openLpEventPath(lpIndex, eventType);
+               ++lpEventHandlerPaths[eventType];
+               return 0;
+       }
+       return 1;
+}
+
+int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
+{
+       if ((eventType < HvLpEvent_Type_NumTypes) &&
+                       lpEventHandler[eventType] &&
+                       lpEventHandlerPaths[eventType]) {
+               if (lpIndex == 0)
+                       lpIndex = itLpNaca.xLpIndex;
+               HvCallEvent_closeLpEventPath(lpIndex, eventType);
+               --lpEventHandlerPaths[eventType];
+               return 0;
+       }
+       return 1;
+}
+
 static int proc_lpevents_show(struct seq_file *m, void *v)
 {
        int cpu, i;
similarity index 97%
rename from include/asm-ppc64/iSeries/IoHriMainStore.h
rename to arch/powerpc/platforms/iseries/main_store.h
index 45ed3ea..74f6889 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * IoHriMainStore.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -17,8 +16,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
-#ifndef _IOHRIMAINSTORE_H
-#define _IOHRIMAINSTORE_H
+#ifndef _ISERIES_MAIN_STORE_H
+#define _ISERIES_MAIN_STORE_H
 
 /* Main Store Vpd for Condor,iStar,sStar */
 struct IoHriMainStoreSegment4 {
@@ -163,4 +162,4 @@ struct IoHriMainStoreSegment5 {
 
 extern u64     xMsVpd[];
 
-#endif /* _IOHRIMAINSTORE_H */
+#endif /* _ISERIES_MAIN_STORE_H */
similarity index 93%
rename from arch/ppc64/kernel/mf.c
rename to arch/powerpc/platforms/iseries/mf.c
index ef4a338..e5de31a 100644 (file)
@@ -1,29 +1,28 @@
 /*
-  * mf.c
-  * Copyright (C) 2001 Troy D. Armstrong  IBM Corporation
-  * Copyright (C) 2004-2005 Stephen Rothwell  IBM Corporation
-  *
-  * This modules exists as an interface between a Linux secondary partition
-  * running on an iSeries and the primary partition's Virtual Service
-  * Processor (VSP) object.  The VSP has final authority over powering on/off
-  * all partitions in the iSeries.  It also provides miscellaneous low-level
-  * machine facility type operations.
-  *
-  *
-  * This program is free software; you can redistribute it and/or modify
-  * it under the terms of the GNU General Public License as published by
-  * the Free Software Foundation; either version 2 of the License, or
-  * (at your option) any later version.
-  *
-  * This program is distributed in the hope that it will be useful,
-  * but WITHOUT ANY WARRANTY; without even the implied warranty of
-  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  * GNU General Public License for more details.
-  *
-  * You should have received a copy of the GNU General Public License
-  * along with this program; if not, write to the Free Software
-  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
-  */
+ * Copyright (C) 2001 Troy D. Armstrong  IBM Corporation
+ * Copyright (C) 2004-2005 Stephen Rothwell  IBM Corporation
+ *
+ * This modules exists as an interface between a Linux secondary partition
+ * running on an iSeries and the primary partition's Virtual Service
+ * Processor (VSP) object.  The VSP has final authority over powering on/off
+ * all partitions in the iSeries.  It also provides miscellaneous low-level
+ * machine facility type operations.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
 
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/bcd.h>
+#include <linux/rtc.h>
 
 #include <asm/time.h>
 #include <asm/uaccess.h>
 #include <asm/paca.h>
+#include <asm/abs_addr.h>
 #include <asm/iSeries/vio.h>
 #include <asm/iSeries/mf.h>
 #include <asm/iSeries/HvLpConfig.h>
 #include <asm/iSeries/ItLpQueue.h>
 
+#include "setup.h"
+
+extern int piranha_simulator;
+
 /*
  * This is the structure layout for the Machine Facilites LPAR event
  * flows.
@@ -1061,10 +1066,10 @@ static void mf_getSrcHistory(char *buffer, int size)
        ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
        ev->event.data.vsp_cmd.result_code = 0xFF;
        ev->event.data.vsp_cmd.reserved = 0;
-       ev->event.data.vsp_cmd.sub_data.page[0] = ISERIES_HV_ADDR(pages[0]);
-       ev->event.data.vsp_cmd.sub_data.page[1] = ISERIES_HV_ADDR(pages[1]);
-       ev->event.data.vsp_cmd.sub_data.page[2] = ISERIES_HV_ADDR(pages[2]);
-       ev->event.data.vsp_cmd.sub_data.page[3] = ISERIES_HV_ADDR(pages[3]);
+       ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]);
+       ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]);
+       ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]);
+       ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]);
        mb();
        if (signal_event(ev) != 0)
                return;
@@ -1279,3 +1284,38 @@ static int __init mf_proc_init(void)
 __initcall(mf_proc_init);
 
 #endif /* CONFIG_PROC_FS */
+
+/*
+ * Get the RTC from the virtual service processor
+ * This requires flowing LpEvents to the primary partition
+ */
+void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
+{
+       if (piranha_simulator)
+               return;
+
+       mf_get_rtc(rtc_tm);
+       rtc_tm->tm_mon--;
+}
+
+/*
+ * Set the RTC in the virtual service processor
+ * This requires flowing LpEvents to the primary partition
+ */
+int iSeries_set_rtc_time(struct rtc_time *tm)
+{
+       mf_set_rtc(tm);
+       return 0;
+}
+
+unsigned long iSeries_get_boot_time(void)
+{
+       struct rtc_time tm;
+
+       if (piranha_simulator)
+               return 0;
+
+       mf_get_boot_rtc(&tm);
+       return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday,
+                     tm.tm_hour, tm.tm_min, tm.tm_sec);
+}
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
new file mode 100644 (file)
index 0000000..09f1452
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * This file contains miscellaneous low-level functions.
+ *    Copyright (C) 1995-2005 IBM Corp
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
+ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/processor.h>
+#include <asm/asm-offsets.h>
+
+       .text
+
+/* unsigned long local_save_flags(void) */
+_GLOBAL(local_get_flags)
+       lbz     r3,PACAPROCENABLED(r13)
+       blr
+
+/* unsigned long local_irq_disable(void) */
+_GLOBAL(local_irq_disable)
+       lbz     r3,PACAPROCENABLED(r13)
+       li      r4,0
+       stb     r4,PACAPROCENABLED(r13)
+       blr                     /* Done */
+
+/* void local_irq_restore(unsigned long flags) */
+_GLOBAL(local_irq_restore)
+       lbz     r5,PACAPROCENABLED(r13)
+        /* Check if things are setup the way we want _already_. */
+       cmpw    0,r3,r5
+       beqlr
+       /* are we enabling interrupts? */
+       cmpdi   0,r3,0
+       stb     r3,PACAPROCENABLED(r13)
+       beqlr
+       /* Check pending interrupts */
+       /*   A decrementer, IPI or PMC interrupt may have occurred
+        *   while we were in the hypervisor (which enables) */
+       ld      r4,PACALPPACA+LPPACAANYINT(r13)
+       cmpdi   r4,0
+       beqlr
+
+       /*
+        * Handle pending interrupts in interrupt context
+        */
+       li      r0,0x5555
+       sc
+       blr
similarity index 86%
rename from arch/ppc64/kernel/iSeries_pci.c
rename to arch/powerpc/platforms/iseries/pci.c
index fbc273c..959e59f 100644 (file)
@@ -1,28 +1,26 @@
 /*
- * iSeries_pci.c
- *
  * Copyright (C) 2001 Allan Trautman, IBM Corporation
  *
  * iSeries specific routines for PCI.
- * 
+ *
  * Based on code from pci.c and iSeries_pci.c 32bit
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 #include <linux/kernel.h>
-#include <linux/list.h> 
+#include <linux/list.h>
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <asm/pci-bridge.h>
 #include <asm/ppcdebug.h>
 #include <asm/iommu.h>
+#include <asm/abs_addr.h>
 
-#include <asm/iSeries/HvCallPci.h>
 #include <asm/iSeries/HvCallXm.h>
-#include <asm/iSeries/iSeries_irq.h>
-#include <asm/iSeries/iSeries_pci.h>
 #include <asm/iSeries/mf.h>
 
+#include <asm/ppc-pci.h>
+
+#include "irq.h"
 #include "pci.h"
+#include "call_pci.h"
 
 extern unsigned long io_page_mask;
 
 /*
- * Forward declares of prototypes. 
+ * Forward declares of prototypes.
  */
-static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn);
+static struct device_node *find_Device_Node(int bus, int devfn);
 static void scan_PHB_slots(struct pci_controller *Phb);
 static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
 static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info);
@@ -68,7 +68,7 @@ static long Pci_Cfg_Write_Count;
 #endif
 static long Pci_Error_Count;
 
-static int Pci_Retry_Max = 3;  /* Only retry 3 times  */       
+static int Pci_Retry_Max = 3;  /* Only retry 3 times  */
 static int Pci_Error_Flag = 1; /* Set Retry Error on. */
 
 static struct pci_ops iSeries_pci_ops;
@@ -87,7 +87,7 @@ static long current_iomm_table_entry;
 /*
  * Lookup Tables.
  */
-static struct iSeries_Device_Node **iomm_table;
+static struct device_node **iomm_table;
 static u8 *iobar_table;
 
 /*
@@ -179,7 +179,7 @@ static void allocate_device_bars(struct pci_dev *dev)
        for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) {
                bar_res = &dev->resource[bar_num];
                iomm_table_allocate_entry(dev, bar_num);
-       }
+       }
 }
 
 /*
@@ -201,29 +201,31 @@ static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
 /*
  * build_device_node(u16 Bus, int SubBus, u8 DevFn)
  */
-static struct iSeries_Device_Node *build_device_node(HvBusNumber Bus,
+static struct device_node *build_device_node(HvBusNumber Bus,
                HvSubBusNumber SubBus, int AgentId, int Function)
 {
-       struct iSeries_Device_Node *node;
+       struct device_node *node;
+       struct pci_dn *pdn;
 
        PPCDBG(PPCDBG_BUSWALK,
                        "-build_device_node 0x%02X.%02X.%02X Function: %02X\n",
                        Bus, SubBus, AgentId, Function);
 
-       node = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL);
+       node = kmalloc(sizeof(struct device_node), GFP_KERNEL);
        if (node == NULL)
                return NULL;
-
-       memset(node, 0, sizeof(struct iSeries_Device_Node));
-       list_add_tail(&node->Device_List, &iSeries_Global_Device_List);
-#if 0
-       node->DsaAddr = ((u64)Bus << 48) + ((u64)SubBus << 40) + ((u64)0x10 << 32);
-#endif
-       node->DsaAddr.DsaAddr = 0;
-       node->DsaAddr.Dsa.busNumber = Bus;
-       node->DsaAddr.Dsa.subBusNumber = SubBus;
-       node->DsaAddr.Dsa.deviceId = 0x10;
-       node->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
+       memset(node, 0, sizeof(struct device_node));
+       pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
+       if (pdn == NULL) {
+               kfree(node);
+               return NULL;
+       }
+       node->data = pdn;
+       pdn->node = node;
+       list_add_tail(&pdn->Device_List, &iSeries_Global_Device_List);
+       pdn->busno = Bus;
+       pdn->bussubno = SubBus;
+       pdn->devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
        return node;
 }
 
@@ -278,28 +280,28 @@ unsigned long __init find_and_init_phbs(void)
 
 /*
  * iSeries_pcibios_init
- *  
+ *
  * Chance to initialize and structures or variable before PCI Bus walk.
  */
 void iSeries_pcibios_init(void)
 {
-       PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n"); 
+       PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n");
        iomm_table_initialize();
        find_and_init_phbs();
        io_page_mask = -1;
-       PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n"); 
+       PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n");
 }
 
 /*
- * iSeries_pci_final_fixup(void)  
+ * iSeries_pci_final_fixup(void)
  */
 void __init iSeries_pci_final_fixup(void)
 {
        struct pci_dev *pdev = NULL;
-       struct iSeries_Device_Node *node;
-       int DeviceCount = 0;
+       struct device_node *node;
+       int DeviceCount = 0;
 
-       PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n"); 
+       PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n");
 
        /* Fix up at the device node and pci_dev relationship */
        mf_display_src(0xC9000100);
@@ -313,7 +315,7 @@ void __init iSeries_pci_final_fixup(void)
                if (node != NULL) {
                        ++DeviceCount;
                        pdev->sysdata = (void *)node;
-                       node->PciDev = pdev;
+                       PCI_DN(node)->pcidev = pdev;
                        PPCDBG(PPCDBG_BUSWALK,
                                        "pdev 0x%p <==> DevNode 0x%p\n",
                                        pdev, node);
@@ -323,7 +325,7 @@ void __init iSeries_pci_final_fixup(void)
                } else
                        printk("PCI: Device Tree not found for 0x%016lX\n",
                                        (unsigned long)pdev);
-               pdev->irq = node->Irq;
+               pdev->irq = PCI_DN(node)->Irq;
        }
        iSeries_activate_IRQs();
        mf_display_src(0xC9000200);
@@ -332,24 +334,24 @@ void __init iSeries_pci_final_fixup(void)
 void pcibios_fixup_bus(struct pci_bus *PciBus)
 {
        PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",
-                       PciBus->number); 
+                       PciBus->number);
 }
 
 void pcibios_fixup_resources(struct pci_dev *pdev)
 {
        PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev);
-}   
+}
 
 /*
- * Loop through each node function to find usable EADs bridges.  
+ * Loop through each node function to find usable EADs bridges.
  */
 static void scan_PHB_slots(struct pci_controller *Phb)
 {
        struct HvCallPci_DeviceInfo *DevInfo;
-       HvBusNumber bus = Phb->local_number;    /* System Bus */        
+       HvBusNumber bus = Phb->local_number;    /* System Bus */
        const HvSubBusNumber SubBus = 0;        /* EADs is always 0. */
        int HvRc = 0;
-       int IdSel;      
+       int IdSel;
        const int MaxAgents = 8;
 
        DevInfo = (struct HvCallPci_DeviceInfo*)
@@ -358,11 +360,11 @@ static void scan_PHB_slots(struct pci_controller *Phb)
                return;
 
        /*
-        * Probe for EADs Bridges      
+        * Probe for EADs Bridges
         */
        for (IdSel = 1; IdSel < MaxAgents; ++IdSel) {
-               HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
-                               ISERIES_HV_ADDR(DevInfo),
+               HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
+                               iseries_hv_addr(DevInfo),
                                sizeof(struct HvCallPci_DeviceInfo));
                if (HvRc == 0) {
                        if (DevInfo->deviceType == HvCallPci_NodeDevice)
@@ -393,19 +395,19 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
 
        /* Note: hvSubBus and irq is always be 0 at this level! */
        for (Function = 0; Function < 8; ++Function) {
-               AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
+               AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
                HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0);
-               if (HvRc == 0) {
+               if (HvRc == 0) {
                        printk("found device at bus %d idsel %d func %d (AgentId %x)\n",
                               bus, IdSel, Function, AgentId);
-                       /*  Connect EADs: 0x18.00.12 = 0x00 */
+                       /*  Connect EADs: 0x18.00.12 = 0x00 */
                        PPCDBG(PPCDBG_BUSWALK,
                                        "PCI:Connect EADs: 0x%02X.%02X.%02X\n",
                                        bus, SubBus, AgentId);
-                       HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
-                                       ISERIES_HV_ADDR(BridgeInfo),
+                       HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
+                                       iseries_hv_addr(BridgeInfo),
                                        sizeof(struct HvCallPci_BridgeInfo));
-                       if (HvRc == 0) {
+                       if (HvRc == 0) {
                                printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n",
                                        BridgeInfo->busUnitInfo.deviceType,
                                        BridgeInfo->subBusNumber,
@@ -428,7 +430,7 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
                                        printk("PCI: Invalid Bridge Configuration(0x%02X)",
                                                BridgeInfo->busUnitInfo.deviceType);
                        }
-               } else if (HvRc != 0x000B)
+               } else if (HvRc != 0x000B)
                        pci_Log_Error("EADs Connect",
                                        bus, SubBus, AgentId, HvRc);
        }
@@ -441,7 +443,7 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
 static int scan_bridge_slot(HvBusNumber Bus,
                struct HvCallPci_BridgeInfo *BridgeInfo)
 {
-       struct iSeries_Device_Node *node;
+       struct device_node *node;
        HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
        u16 VendorId = 0;
        int HvRc = 0;
@@ -451,16 +453,16 @@ static int scan_bridge_slot(HvBusNumber Bus,
        HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
 
        /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
-       Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
+       Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
        PPCDBG(PPCDBG_BUSWALK,
                "PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",
                Bus, 0, EADsIdSel, Irq);
 
        /*
-        * Connect all functions of any device found.  
+        * Connect all functions of any device found.
         */
-       for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
-               for (Function = 0; Function < 8; ++Function) {
+       for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
+               for (Function = 0; Function < 8; ++Function) {
                        HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
                        HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
                                        AgentId, Irq);
@@ -484,15 +486,15 @@ static int scan_bridge_slot(HvBusNumber Bus,
                               "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n",
                               Bus, SubBus, AgentId, VendorId, Irq);
                        HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId,
-                                                     PCI_INTERRUPT_LINE, Irq);  
+                                                     PCI_INTERRUPT_LINE, Irq);
                        if (HvRc != 0)
                                pci_Log_Error("PciCfgStore Irq Failed!",
                                              Bus, SubBus, AgentId, HvRc);
 
                        ++DeviceCount;
                        node = build_device_node(Bus, SubBus, EADsIdSel, Function);
-                       node->Irq = Irq;
-                       node->LogicalSlot = BridgeInfo->logicalSlotNumber;
+                       PCI_DN(node)->Irq = Irq;
+                       PCI_DN(node)->LogicalSlot = BridgeInfo->logicalSlotNumber;
 
                } /* for (Function = 0; Function < 8; ++Function) */
        } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
@@ -542,16 +544,13 @@ EXPORT_SYMBOL(iSeries_memcpy_fromio);
 /*
  * Look down the chain to find the matching Device Device
  */
-static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn)
+static struct device_node *find_Device_Node(int bus, int devfn)
 {
-       struct list_head *pos;
+       struct pci_dn *pdn;
 
-       list_for_each(pos, &iSeries_Global_Device_List) {
-               struct iSeries_Device_Node *node =
-                       list_entry(pos, struct iSeries_Device_Node, Device_List);
-
-               if ((bus == ISERIES_BUS(node)) && (devfn == node->DevFn))
-                       return node;
+       list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
+               if ((bus == pdn->busno) && (devfn == pdn->devfn))
+                       return pdn->node;
        }
        return NULL;
 }
@@ -562,12 +561,12 @@ static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn)
  * Sanity Check Node PciDev to passed pci_dev
  * If none is found, returns a NULL which the client must handle.
  */
-static struct iSeries_Device_Node *get_Device_Node(struct pci_dev *pdev)
+static struct device_node *get_Device_Node(struct pci_dev *pdev)
 {
-       struct iSeries_Device_Node *node;
+       struct device_node *node;
 
        node = pdev->sysdata;
-       if (node == NULL || node->PciDev != pdev)
+       if (node == NULL || PCI_DN(node)->pcidev != pdev)
                node = find_Device_Node(pdev->bus->number, pdev->devfn);
        return node;
 }
@@ -595,7 +594,7 @@ static u64 hv_cfg_write_func[4] = {
 static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
                int offset, int size, u32 *val)
 {
-       struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn);
+       struct device_node *node = find_Device_Node(bus->number, devfn);
        u64 fn;
        struct HvCallPci_LoadReturn ret;
 
@@ -607,7 +606,7 @@ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
        }
 
        fn = hv_cfg_read_func[(size - 1) & 3];
-       HvCall3Ret16(fn, &ret, node->DsaAddr.DsaAddr, offset, 0);
+       HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
 
        if (ret.rc != 0) {
                *val = ~0;
@@ -625,7 +624,7 @@ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
 static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
                int offset, int size, u32 val)
 {
-       struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn);
+       struct device_node *node = find_Device_Node(bus->number, devfn);
        u64 fn;
        u64 ret;
 
@@ -635,7 +634,7 @@ static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
                return PCIBIOS_BAD_REGISTER_NUMBER;
 
        fn = hv_cfg_write_func[(size - 1) & 3];
-       ret = HvCall4(fn, node->DsaAddr.DsaAddr, offset, val, 0);
+       ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
 
        if (ret != 0)
                return PCIBIOS_DEVICE_NOT_FOUND;
@@ -657,14 +656,16 @@ static struct pci_ops iSeries_pci_ops = {
  * PCI: Device 23.90 ReadL Retry( 1)
  * PCI: Device 23.90 ReadL Retry Successful(1)
  */
-static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode,
+static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
                int *retry, u64 ret)
 {
        if (ret != 0)  {
+               struct pci_dn *pdn = PCI_DN(DevNode);
+
                ++Pci_Error_Count;
                (*retry)++;
                printk("PCI: %s: Device 0x%04X:%02X  I/O Error(%2d): 0x%04X\n",
-                               TextHdr, DevNode->DsaAddr.Dsa.busNumber, DevNode->DevFn,
+                               TextHdr, pdn->busno, pdn->devfn,
                                *retry, (int)ret);
                /*
                 * Bump the retry and check for retry count exceeded.
@@ -687,14 +688,14 @@ static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode,
  * Note: Make sure the passed variable end up on the stack to avoid
  * the exposure of being device global.
  */
-static inline struct iSeries_Device_Node *xlate_iomm_address(
+static inline struct device_node *xlate_iomm_address(
                const volatile void __iomem *IoAddress,
                u64 *dsaptr, u64 *BarOffsetPtr)
 {
        unsigned long OrigIoAddr;
        unsigned long BaseIoAddr;
        unsigned long TableIndex;
-       struct iSeries_Device_Node *DevNode;
+       struct device_node *DevNode;
 
        OrigIoAddr = (unsigned long __force)IoAddress;
        if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
@@ -705,7 +706,7 @@ static inline struct iSeries_Device_Node *xlate_iomm_address(
 
        if (DevNode != NULL) {
                int barnum = iobar_table[TableIndex];
-               *dsaptr = DevNode->DsaAddr.DsaAddr | (barnum << 24);
+               *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24);
                *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
        } else
                panic("PCI: Invalid PCI IoAddress detected!\n");
@@ -727,7 +728,7 @@ u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
        u64 dsa;
        int retry = 0;
        struct HvCallPci_LoadReturn ret;
-       struct iSeries_Device_Node *DevNode =
+       struct device_node *DevNode =
                xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
        if (DevNode == NULL) {
@@ -757,7 +758,7 @@ u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
        u64 dsa;
        int retry = 0;
        struct HvCallPci_LoadReturn ret;
-       struct iSeries_Device_Node *DevNode =
+       struct device_node *DevNode =
                xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
        if (DevNode == NULL) {
@@ -788,7 +789,7 @@ u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
        u64 dsa;
        int retry = 0;
        struct HvCallPci_LoadReturn ret;
-       struct iSeries_Device_Node *DevNode =
+       struct device_node *DevNode =
                xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
        if (DevNode == NULL) {
@@ -826,7 +827,7 @@ void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
        u64 dsa;
        int retry = 0;
        u64 rc;
-       struct iSeries_Device_Node *DevNode =
+       struct device_node *DevNode =
                xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
        if (DevNode == NULL) {
@@ -854,7 +855,7 @@ void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
        u64 dsa;
        int retry = 0;
        u64 rc;
-       struct iSeries_Device_Node *DevNode =
+       struct device_node *DevNode =
                xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
        if (DevNode == NULL) {
@@ -882,7 +883,7 @@ void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
        u64 dsa;
        int retry = 0;
        u64 rc;
-       struct iSeries_Device_Node *DevNode =
+       struct device_node *DevNode =
                xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
        if (DevNode == NULL) {
similarity index 55%
rename from include/asm-ppc64/iSeries/iSeries_pci.h
rename to arch/powerpc/platforms/iseries/pci.h
index 575f611..33a8489 100644 (file)
@@ -1,8 +1,8 @@
-#ifndef _ISERIES_64_PCI_H
-#define _ISERIES_64_PCI_H
+#ifndef _PLATFORMS_ISERIES_PCI_H
+#define _PLATFORMS_ISERIES_PCI_H
 
 /*
- * File iSeries_pci.h created by Allan Trautman on Tue Feb 20, 2001.
+ * Created by Allan Trautman on Tue Feb 20, 2001.
  *
  * Define some useful macros for the iSeries pci routines.
  * Copyright (C) 2001  Allan H Trautman, IBM Corporation
  * End Change Activity
  */
 
-#include <asm/iSeries/HvCallPci.h>
-#include <asm/abs_addr.h>
+#include <asm/pci-bridge.h>
 
 struct pci_dev;                                /* For Forward Reference */
-struct iSeries_Device_Node;
-
-/*
- * Gets iSeries Bus, SubBus, DevFn using iSeries_Device_Node structure
- */
-
-#define ISERIES_BUS(DevPtr)    DevPtr->DsaAddr.Dsa.busNumber
-#define ISERIES_SUBBUS(DevPtr) DevPtr->DsaAddr.Dsa.subBusNumber
-#define ISERIES_DEVICE(DevPtr) DevPtr->DsaAddr.Dsa.deviceId
-#define ISERIES_DSA(DevPtr)    DevPtr->DsaAddr.DsaAddr
-#define ISERIES_DEVNODE(PciDev)        ((struct iSeries_Device_Node *)PciDev->sysdata)
-
-#define EADsMaxAgents 7
 
 /*
  * Decodes Linux DevFn to iSeries DevFn, bridge device, or function.
@@ -62,27 +48,16 @@ struct iSeries_Device_Node;
 #define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus)       ((subbus >> 2) & 0x7)
 
 /*
- * Converts Virtual Address to Real Address for Hypervisor calls
+ * Generate a Direct Select Address for the Hypervisor
  */
-#define ISERIES_HV_ADDR(virtaddr)      \
-       (0x8000000000000000 | virt_to_abs(virtaddr))
+static inline u64 iseries_ds_addr(struct device_node *node)
+{
+       struct pci_dn *pdn = PCI_DN(node);
 
-/*
- * iSeries Device Information
- */
-struct iSeries_Device_Node {
-       struct list_head Device_List;
-       struct pci_dev  *PciDev;
-       union HvDsaMap  DsaAddr;        /* Direct Select Address */
-                                       /* busNumber, subBusNumber, */
-                                       /* deviceId, barNumber */
-       int             DevFn;          /* Linux devfn */
-       int             Irq;            /* Assigned IRQ */
-       int             Flags;          /* Possible flags(disable/bist)*/
-       u8              LogicalSlot;    /* Hv Slot Index for Tces */
-       struct iommu_table *iommu_table;/* Device TCE Table */
-};
+       return ((u64)pdn->busno << 48) + ((u64)pdn->bussubno << 40)
+                       + ((u64)0x10 << 32);
+}
 
 extern void    iSeries_Device_Information(struct pci_dev*, int);
 
-#endif /* _ISERIES_64_PCI_H */
+#endif /* _PLATFORMS_ISERIES_PCI_H */
similarity index 91%
rename from arch/ppc64/kernel/iSeries_proc.c
rename to arch/powerpc/platforms/iseries/proc.c
index 0fe3116..6f1929c 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * iSeries_proc.c
  * Copyright (C) 2001  Kyle A. Lucke IBM Corporation
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
  *
@@ -27,8 +26,9 @@
 #include <asm/lppaca.h>
 #include <asm/iSeries/ItLpQueue.h>
 #include <asm/iSeries/HvCallXm.h>
-#include <asm/iSeries/IoHriMainStore.h>
-#include <asm/iSeries/IoHriProcessorVpd.h>
+
+#include "processor_vpd.h"
+#include "main_store.h"
 
 static int __init iseries_proc_create(void)
 {
@@ -68,12 +68,15 @@ static int proc_titantod_show(struct seq_file *m, void *v)
                unsigned long tb_ticks = (tb0 - startTb);
                unsigned long titan_jiffies = titan_usec / (1000000/HZ);
                unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
-               unsigned long titan_jiff_rem_usec = titan_usec - titan_jiff_usec;
+               unsigned long titan_jiff_rem_usec =
+                       titan_usec - titan_jiff_usec;
                unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
                unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
                unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
-               unsigned long tb_jiff_rem_usec = tb_jiff_rem_ticks / tb_ticks_per_usec;
-               unsigned long new_tb_ticks_per_jiffy = (tb_ticks * (1000000/HZ))/titan_usec;
+               unsigned long tb_jiff_rem_usec =
+                       tb_jiff_rem_ticks / tb_ticks_per_usec;
+               unsigned long new_tb_ticks_per_jiffy =
+                       (tb_ticks * (1000000/HZ))/titan_usec;
 
                seq_printf(m, "  titan elapsed = %lu uSec\n", titan_usec);
                seq_printf(m, "  tb elapsed    = %lu ticks\n", tb_ticks);
@@ -1,5 +1,4 @@
 /*
- * IoHriProcessorVpd.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
-#ifndef _IOHRIPROCESSORVPD_H
-#define _IOHRIPROCESSORVPD_H
+#ifndef _ISERIES_PROCESSOR_VPD_H
+#define _ISERIES_PROCESSOR_VPD_H
 
 #include <asm/types.h>
 
@@ -83,4 +82,4 @@ struct IoHriProcessorVpd {
 
 extern struct IoHriProcessorVpd        xIoHriProcessorVpd[];
 
-#endif /* _IOHRIPROCESSORVPD_H */
+#endif /* _ISERIES_PROCESSOR_VPD_H */
similarity index 95%
rename from include/asm-ppc64/iSeries/HvReleaseData.h
rename to arch/powerpc/platforms/iseries/release_data.h
index c8162e5..c68b9c3 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvReleaseData.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
-#ifndef _HVRELEASEDATA_H
-#define _HVRELEASEDATA_H
+#ifndef _ISERIES_RELEASE_DATA_H
+#define _ISERIES_RELEASE_DATA_H
 
 /*
  * This control block contains the critical information about the
@@ -61,4 +60,4 @@ struct HvReleaseData {
 
 extern struct HvReleaseData    hvReleaseData;
 
-#endif /* _HVRELEASEDATA_H */
+#endif /* _ISERIES_RELEASE_DATA_H */
similarity index 74%
rename from arch/ppc64/kernel/iSeries_setup.c
rename to arch/powerpc/platforms/iseries/setup.c
index 3ffefbb..b279014 100644 (file)
@@ -2,8 +2,6 @@
  *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
  *    Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
  *
- *    Module name: iSeries_setup.c
- *
  *    Description:
  *      Architecture- / platform-specific boot-time initialization code for
  *      the IBM iSeries LPAR.  Adapted from original code by Grant Erickson and
 #include <asm/firmware.h>
 
 #include <asm/time.h>
-#include "iSeries_setup.h"
 #include <asm/naca.h>
 #include <asm/paca.h>
 #include <asm/cache.h>
 #include <asm/sections.h>
 #include <asm/abs_addr.h>
-#include <asm/iSeries/HvCallHpt.h>
 #include <asm/iSeries/HvLpConfig.h>
 #include <asm/iSeries/HvCallEvent.h>
-#include <asm/iSeries/HvCallSm.h>
 #include <asm/iSeries/HvCallXm.h>
 #include <asm/iSeries/ItLpQueue.h>
-#include <asm/iSeries/IoHriMainStore.h>
 #include <asm/iSeries/mf.h>
 #include <asm/iSeries/HvLpEvent.h>
-#include <asm/iSeries/iSeries_irq.h>
-#include <asm/iSeries/IoHriProcessorVpd.h>
-#include <asm/iSeries/ItVpdAreas.h>
 #include <asm/iSeries/LparMap.h>
 
+#include "setup.h"
+#include "irq.h"
+#include "vpd_areas.h"
+#include "processor_vpd.h"
+#include "main_store.h"
+#include "call_sm.h"
+#include "call_hpt.h"
+
 extern void hvlog(char *fmt, ...);
 
 #ifdef DEBUG
@@ -74,8 +73,8 @@ extern void hvlog(char *fmt, ...);
 extern void ppcdbg_initialize(void);
 
 static void build_iSeries_Memory_Map(void);
-static void setup_iSeries_cache_sizes(void);
-static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
+static void iseries_shared_idle(void);
+static void iseries_dedicated_idle(void);
 #ifdef CONFIG_PCI
 extern void iSeries_pci_final_fixup(void);
 #else
@@ -83,14 +82,6 @@ static void iSeries_pci_final_fixup(void) { }
 #endif
 
 /* Global Variables */
-static unsigned long procFreqHz;
-static unsigned long procFreqMhz;
-static unsigned long procFreqMhzHundreths;
-
-static unsigned long tbFreqHz;
-static unsigned long tbFreqMhz;
-static unsigned long tbFreqMhzHundreths;
-
 int piranha_simulator;
 
 extern int rd_size;            /* Defined in drivers/block/rd.c */
@@ -319,6 +310,8 @@ static void __init iSeries_init_early(void)
 
        ppcdbg_initialize();
 
+       ppc64_interrupt_controller = IC_ISERIES;
+
 #if defined(CONFIG_BLK_DEV_INITRD)
        /*
         * If the init RAM disk has been configured and there is
@@ -341,12 +334,6 @@ static void __init iSeries_init_early(void)
        iSeries_recal_titan = HvCallXm_loadTod();
 
        /*
-        * Cache sizes must be initialized before hpte_init_iSeries is called
-        * as the later need them for flush_icache_range()
-        */
-       setup_iSeries_cache_sizes();
-
-       /*
         * Initialize the hash table management pointers
         */
        hpte_init_iSeries();
@@ -356,12 +343,6 @@ static void __init iSeries_init_early(void)
         */
        iommu_init_early_iSeries();
 
-       /*
-        * Initialize the table which translate Linux physical addresses to
-        * AS/400 absolute addresses
-        */
-       build_iSeries_Memory_Map();
-
        iSeries_get_cmdline();
 
        /* Save unparsed command line copy for /proc/cmdline */
@@ -379,14 +360,6 @@ static void __init iSeries_init_early(void)
                }
        }
 
-       /* Bolt kernel mappings for all of memory (or just a bit if we've got a limit) */
-       iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);
-
-       lmb_init();
-       lmb_add(0, systemcfg->physicalMemorySize);
-       lmb_analyze();
-       lmb_reserve(0, __pa(klimit));
-
        /* Initialize machine-dependency vectors */
 #ifdef CONFIG_SMP
        smp_init_iSeries();
@@ -457,7 +430,6 @@ static void __init build_iSeries_Memory_Map(void)
        u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
        u32 nextPhysChunk;
        u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
-       u32 num_ptegs;
        u32 totalChunks,moreChunks;
        u32 currChunk, thisChunk, absChunk;
        u32 currDword;
@@ -520,10 +492,7 @@ static void __init build_iSeries_Memory_Map(void)
        printk("HPT absolute addr = %016lx, size = %dK\n",
                        chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
 
-       /* Fill in the hashed page table hash mask */
-       num_ptegs = hptSizePages *
-               (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
-       htab_hash_mask = num_ptegs - 1;
+       ppc64_pft_size = __ilog2(hptSizePages * PAGE_SIZE);
 
        /*
         * The actual hashed page table is in the hypervisor,
@@ -592,144 +561,33 @@ static void __init build_iSeries_Memory_Map(void)
 }
 
 /*
- * Set up the variables that describe the cache line sizes
- * for this machine.
- */
-static void __init setup_iSeries_cache_sizes(void)
-{
-       unsigned int i, n;
-       unsigned int procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
-
-       systemcfg->icache_size =
-       ppc64_caches.isize = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
-       systemcfg->icache_line_size =
-       ppc64_caches.iline_size =
-               xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
-       systemcfg->dcache_size =
-       ppc64_caches.dsize =
-               xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
-       systemcfg->dcache_line_size =
-       ppc64_caches.dline_size =
-               xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
-       ppc64_caches.ilines_per_page = PAGE_SIZE / ppc64_caches.iline_size;
-       ppc64_caches.dlines_per_page = PAGE_SIZE / ppc64_caches.dline_size;
-
-       i = ppc64_caches.iline_size;
-       n = 0;
-       while ((i = (i / 2)))
-               ++n;
-       ppc64_caches.log_iline_size = n;
-
-       i = ppc64_caches.dline_size;
-       n = 0;
-       while ((i = (i / 2)))
-               ++n;
-       ppc64_caches.log_dline_size = n;
-
-       printk("D-cache line size = %d\n",
-                       (unsigned int)ppc64_caches.dline_size);
-       printk("I-cache line size = %d\n",
-                       (unsigned int)ppc64_caches.iline_size);
-}
-
-/*
- * Create a pte. Used during initialization only.
- */
-static void iSeries_make_pte(unsigned long va, unsigned long pa,
-                            int mode)
-{
-       hpte_t local_hpte, rhpte;
-       unsigned long hash, vpn;
-       long slot;
-
-       vpn = va >> PAGE_SHIFT;
-       hash = hpt_hash(vpn, 0);
-
-       local_hpte.r = pa | mode;
-       local_hpte.v = ((va >> 23) << HPTE_V_AVPN_SHIFT)
-               | HPTE_V_BOLTED | HPTE_V_VALID;
-
-       slot = HvCallHpt_findValid(&rhpte, vpn);
-       if (slot < 0) {
-               /* Must find space in primary group */
-               panic("hash_page: hpte already exists\n");
-       }
-       HvCallHpt_addValidate(slot, 0, &local_hpte);
-}
-
-/*
- * Bolt the kernel addr space into the HPT
- */
-static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
-{
-       unsigned long pa;
-       unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
-       hpte_t hpte;
-
-       for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
-               unsigned long ea = (unsigned long)__va(pa);
-               unsigned long vsid = get_kernel_vsid(ea);
-               unsigned long va = (vsid << 28) | (pa & 0xfffffff);
-               unsigned long vpn = va >> PAGE_SHIFT;
-               unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
-
-               /* Make non-kernel text non-executable */
-               if (!in_kernel_text(ea))
-                       mode_rw |= HW_NO_EXEC;
-
-               if (hpte.v & HPTE_V_VALID) {
-                       /* HPTE exists, so just bolt it */
-                       HvCallHpt_setSwBits(slot, 0x10, 0);
-                       /* And make sure the pp bits are correct */
-                       HvCallHpt_setPp(slot, PP_RWXX);
-               } else
-                       /* No HPTE exists, so create a new bolted one */
-                       iSeries_make_pte(va, phys_to_abs(pa), mode_rw);
-       }
-}
-
-/*
  * Document me.
  */
 static void __init iSeries_setup_arch(void)
 {
        unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
 
-       /* Add an eye catcher and the systemcfg layout version number */
-       strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
-       systemcfg->version.major = SYSTEMCFG_MAJOR;
-       systemcfg->version.minor = SYSTEMCFG_MINOR;
+       if (get_paca()->lppaca.shared_proc) {
+               ppc_md.idle_loop = iseries_shared_idle;
+               printk(KERN_INFO "Using shared processor idle loop\n");
+       } else {
+               ppc_md.idle_loop = iseries_dedicated_idle;
+               printk(KERN_INFO "Using dedicated idle loop\n");
+       }
 
        /* Setup the Lp Event Queue */
        setup_hvlpevent_queue();
 
-       /* Compute processor frequency */
-       procFreqHz = ((1UL << 34) * 1000000) /
-                       xIoHriProcessorVpd[procIx].xProcFreq;
-       procFreqMhz = procFreqHz / 1000000;
-       procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
-       ppc_proc_freq = procFreqHz;
-
-       /* Compute time base frequency */
-       tbFreqHz = ((1UL << 32) * 1000000) /
-               xIoHriProcessorVpd[procIx].xTimeBaseFreq;
-       tbFreqMhz = tbFreqHz / 1000000;
-       tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
-       ppc_tb_freq = tbFreqHz;
-
        printk("Max  logical processors = %d\n",
                        itVpdAreas.xSlicMaxLogicalProcs);
        printk("Max physical processors = %d\n",
                        itVpdAreas.xSlicMaxPhysicalProcs);
-       printk("Processor frequency = %lu.%02lu\n", procFreqMhz,
-                       procFreqMhzHundreths);
-       printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
-                       tbFreqMhzHundreths);
+
        systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
        printk("Processor version = %x\n", systemcfg->processor);
 }
 
-static void iSeries_get_cpuinfo(struct seq_file *m)
+static void iSeries_show_cpuinfo(struct seq_file *m)
 {
        seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
 }
@@ -768,49 +626,6 @@ static void iSeries_halt(void)
        mf_power_off();
 }
 
-/*
- * void __init iSeries_calibrate_decr()
- *
- * Description:
- *   This routine retrieves the internal processor frequency from the VPD,
- *   and sets up the kernel timer decrementer based on that value.
- *
- */
-static void __init iSeries_calibrate_decr(void)
-{
-       unsigned long   cyclesPerUsec;
-       struct div_result divres;
-
-       /* Compute decrementer (and TB) frequency in cycles/sec */
-       cyclesPerUsec = ppc_tb_freq / 1000000;
-
-       /*
-        * Set the amount to refresh the decrementer by.  This
-        * is the number of decrementer ticks it takes for
-        * 1/HZ seconds.
-        */
-       tb_ticks_per_jiffy = ppc_tb_freq / HZ;
-
-#if 0
-       /* TEST CODE FOR ADJTIME */
-       tb_ticks_per_jiffy += tb_ticks_per_jiffy / 5000;
-       /* END OF TEST CODE */
-#endif
-
-       /*
-        * tb_ticks_per_sec = freq; would give better accuracy
-        * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures
-        * that jiffies (and xtime) will match the time returned
-        * by do_gettimeofday.
-        */
-       tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
-       tb_ticks_per_usec = cyclesPerUsec;
-       tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
-       div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
-       tb_to_xs = divres.result_low;
-       setup_default_decr();
-}
-
 static void __init iSeries_progress(char * st, unsigned short code)
 {
        printk("Progress: [%04x] - %s\n", (unsigned)code, st);
@@ -878,7 +693,7 @@ static void yield_shared_processor(void)
        process_iSeries_events();
 }
 
-static int iseries_shared_idle(void)
+static void iseries_shared_idle(void)
 {
        while (1) {
                while (!need_resched() && !hvlpevent_is_pending()) {
@@ -900,11 +715,9 @@ static int iseries_shared_idle(void)
 
                schedule();
        }
-
-       return 0;
 }
 
-static int iseries_dedicated_idle(void)
+static void iseries_dedicated_idle(void)
 {
        long oldval;
 
@@ -934,44 +747,252 @@ static int iseries_dedicated_idle(void)
                ppc64_runlatch_on();
                schedule();
        }
-
-       return 0;
 }
 
 #ifndef CONFIG_PCI
 void __init iSeries_init_IRQ(void) { }
 #endif
 
-void __init iSeries_early_setup(void)
+static int __init iseries_probe(int platform)
 {
-       iSeries_fixup_klimit();
+       return PLATFORM_ISERIES_LPAR == platform;
+}
 
-       ppc_md.setup_arch = iSeries_setup_arch;
-       ppc_md.get_cpuinfo = iSeries_get_cpuinfo;
-       ppc_md.init_IRQ = iSeries_init_IRQ;
-       ppc_md.get_irq = iSeries_get_irq;
-       ppc_md.init_early = iSeries_init_early,
+struct machdep_calls __initdata iseries_md = {
+       .setup_arch     = iSeries_setup_arch,
+       .show_cpuinfo   = iSeries_show_cpuinfo,
+       .init_IRQ       = iSeries_init_IRQ,
+       .get_irq        = iSeries_get_irq,
+       .init_early     = iSeries_init_early,
+       .pcibios_fixup  = iSeries_pci_final_fixup,
+       .restart        = iSeries_restart,
+       .power_off      = iSeries_power_off,
+       .halt           = iSeries_halt,
+       .get_boot_time  = iSeries_get_boot_time,
+       .set_rtc_time   = iSeries_set_rtc_time,
+       .get_rtc_time   = iSeries_get_rtc_time,
+       .calibrate_decr = generic_calibrate_decr,
+       .progress       = iSeries_progress,
+       .probe          = iseries_probe,
+       /* XXX Implement enable_pmcs for iSeries */
+};
 
-       ppc_md.pcibios_fixup  = iSeries_pci_final_fixup;
+struct blob {
+       unsigned char data[PAGE_SIZE];
+       unsigned long next;
+};
 
-       ppc_md.restart = iSeries_restart;
-       ppc_md.power_off = iSeries_power_off;
-       ppc_md.halt = iSeries_halt;
+struct iseries_flat_dt {
+       struct boot_param_header header;
+       u64 reserve_map[2];
+       struct blob dt;
+       struct blob strings;
+};
 
-       ppc_md.get_boot_time = iSeries_get_boot_time;
-       ppc_md.set_rtc_time = iSeries_set_rtc_time;
-       ppc_md.get_rtc_time = iSeries_get_rtc_time;
-       ppc_md.calibrate_decr = iSeries_calibrate_decr;
-       ppc_md.progress = iSeries_progress;
+struct iseries_flat_dt iseries_dt;
 
-       /* XXX Implement enable_pmcs for iSeries */
+void dt_init(struct iseries_flat_dt *dt)
+{
+       dt->header.off_mem_rsvmap =
+               offsetof(struct iseries_flat_dt, reserve_map);
+       dt->header.off_dt_struct = offsetof(struct iseries_flat_dt, dt);
+       dt->header.off_dt_strings = offsetof(struct iseries_flat_dt, strings);
+       dt->header.totalsize = sizeof(struct iseries_flat_dt);
+       dt->header.dt_strings_size = sizeof(struct blob);
 
-       if (get_paca()->lppaca.shared_proc) {
-               ppc_md.idle_loop = iseries_shared_idle;
-               printk(KERN_INFO "Using shared processor idle loop\n");
-       } else {
-               ppc_md.idle_loop = iseries_dedicated_idle;
-               printk(KERN_INFO "Using dedicated idle loop\n");
+       /* There is no notion of hardware cpu id on iSeries */
+       dt->header.boot_cpuid_phys = smp_processor_id();
+
+       dt->dt.next = (unsigned long)&dt->dt.data;
+       dt->strings.next = (unsigned long)&dt->strings.data;
+
+       dt->header.magic = OF_DT_HEADER;
+       dt->header.version = 0x10;
+       dt->header.last_comp_version = 0x10;
+
+       dt->reserve_map[0] = 0;
+       dt->reserve_map[1] = 0;
+}
+
+void dt_check_blob(struct blob *b)
+{
+       if (b->next >= (unsigned long)&b->next) {
+               DBG("Ran out of space in flat device tree blob!\n");
+               BUG();
+       }
+}
+
+void dt_push_u32(struct iseries_flat_dt *dt, u32 value)
+{
+       *((u32*)dt->dt.next) = value;
+       dt->dt.next += sizeof(u32);
+
+       dt_check_blob(&dt->dt);
+}
+
+void dt_push_u64(struct iseries_flat_dt *dt, u64 value)
+{
+       *((u64*)dt->dt.next) = value;
+       dt->dt.next += sizeof(u64);
+
+       dt_check_blob(&dt->dt);
+}
+
+unsigned long dt_push_bytes(struct blob *blob, char *data, int len)
+{
+       unsigned long start = blob->next - (unsigned long)blob->data;
+
+       memcpy((char *)blob->next, data, len);
+       blob->next = _ALIGN(blob->next + len, 4);
+
+       dt_check_blob(blob);
+
+       return start;
+}
+
+void dt_start_node(struct iseries_flat_dt *dt, char *name)
+{
+       dt_push_u32(dt, OF_DT_BEGIN_NODE);
+       dt_push_bytes(&dt->dt, name, strlen(name) + 1);
+}
+
+#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
+
+void dt_prop(struct iseries_flat_dt *dt, char *name, char *data, int len)
+{
+       unsigned long offset;
+
+       dt_push_u32(dt, OF_DT_PROP);
+
+       /* Length of the data */
+       dt_push_u32(dt, len);
+
+       /* Put the property name in the string blob. */
+       offset = dt_push_bytes(&dt->strings, name, strlen(name) + 1);
+
+       /* The offset of the properties name in the string blob. */
+       dt_push_u32(dt, (u32)offset);
+
+       /* The actual data. */
+       dt_push_bytes(&dt->dt, data, len);
+}
+
+void dt_prop_str(struct iseries_flat_dt *dt, char *name, char *data)
+{
+       dt_prop(dt, name, data, strlen(data) + 1); /* + 1 for NULL */
+}
+
+void dt_prop_u32(struct iseries_flat_dt *dt, char *name, u32 data)
+{
+       dt_prop(dt, name, (char *)&data, sizeof(u32));
+}
+
+void dt_prop_u64(struct iseries_flat_dt *dt, char *name, u64 data)
+{
+       dt_prop(dt, name, (char *)&data, sizeof(u64));
+}
+
+void dt_prop_u64_list(struct iseries_flat_dt *dt, char *name, u64 *data, int n)
+{
+       dt_prop(dt, name, (char *)data, sizeof(u64) * n);
+}
+
+void dt_prop_empty(struct iseries_flat_dt *dt, char *name)
+{
+       dt_prop(dt, name, NULL, 0);
+}
+
+void dt_cpus(struct iseries_flat_dt *dt)
+{
+       unsigned char buf[32];
+       unsigned char *p;
+       unsigned int i, index;
+       struct IoHriProcessorVpd *d;
+
+       /* yuck */
+       snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
+       p = strchr(buf, ' ');
+       if (!p) p = buf + strlen(buf);
+
+       dt_start_node(dt, "cpus");
+       dt_prop_u32(dt, "#address-cells", 1);
+       dt_prop_u32(dt, "#size-cells", 0);
+
+       for (i = 0; i < NR_CPUS; i++) {
+               if (paca[i].lppaca.dyn_proc_status >= 2)
+                       continue;
+
+               snprintf(p, 32 - (p - buf), "@%d", i);
+               dt_start_node(dt, buf);
+
+               dt_prop_str(dt, "device_type", "cpu");
+
+               index = paca[i].lppaca.dyn_hv_phys_proc_index;
+               d = &xIoHriProcessorVpd[index];
+
+               dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
+               dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
+
+               dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
+               dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
+
+               /* magic conversions to Hz copied from old code */
+               dt_prop_u32(dt, "clock-frequency",
+                       ((1UL << 34) * 1000000) / d->xProcFreq);
+               dt_prop_u32(dt, "timebase-frequency",
+                       ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
+
+               dt_prop_u32(dt, "reg", i);
+
+               dt_end_node(dt);
        }
+
+       dt_end_node(dt);
+}
+
+void build_flat_dt(struct iseries_flat_dt *dt)
+{
+       u64 tmp[2];
+
+       dt_init(dt);
+
+       dt_start_node(dt, "");
+
+       dt_prop_u32(dt, "#address-cells", 2);
+       dt_prop_u32(dt, "#size-cells", 2);
+
+       /* /memory */
+       dt_start_node(dt, "memory@0");
+       dt_prop_str(dt, "name", "memory");
+       dt_prop_str(dt, "device_type", "memory");
+       tmp[0] = 0;
+       tmp[1] = systemcfg->physicalMemorySize;
+       dt_prop_u64_list(dt, "reg", tmp, 2);
+       dt_end_node(dt);
+
+       /* /chosen */
+       dt_start_node(dt, "chosen");
+       dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR);
+       dt_end_node(dt);
+
+       dt_cpus(dt);
+
+       dt_end_node(dt);
+
+       dt_push_u32(dt, OF_DT_END);
 }
 
+void * __init iSeries_early_setup(void)
+{
+       iSeries_fixup_klimit();
+
+       /*
+        * Initialize the table which translate Linux physical addresses to
+        * AS/400 absolute addresses
+        */
+       build_iSeries_Memory_Map();
+
+       build_flat_dt(&iseries_dt);
+
+       return (void *) __pa(&iseries_dt);
+}
similarity index 90%
rename from arch/ppc64/kernel/iSeries_setup.h
rename to arch/powerpc/platforms/iseries/setup.h
index c6eb29a..5213044 100644 (file)
@@ -2,8 +2,6 @@
  *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
  *    Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
  *
- *    Module name: as400_setup.h
- *
  *    Description:
  *      Architecture- / platform-specific boot-time initialization code for
  *      the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
@@ -19,7 +17,7 @@
 #ifndef        __ISERIES_SETUP_H__
 #define        __ISERIES_SETUP_H__
 
-extern void iSeries_get_boot_time(struct rtc_time *tm);
+extern unsigned long iSeries_get_boot_time(void);
 extern int iSeries_set_rtc_time(struct rtc_time *tm);
 extern void iSeries_get_rtc_time(struct rtc_time *tm);
 
similarity index 73%
rename from arch/ppc64/kernel/iSeries_smp.c
rename to arch/powerpc/platforms/iseries/smp.c
index f74386e..f720916 100644 (file)
 
 static unsigned long iSeries_smp_message[NR_CPUS];
 
-void iSeries_smp_message_recv( struct pt_regs * regs )
+void iSeries_smp_message_recv(struct pt_regs *regs)
 {
        int cpu = smp_processor_id();
        int msg;
 
-       if ( num_online_cpus() < 2 )
+       if (num_online_cpus() < 2)
                return;
 
-       for ( msg = 0; msg < 4; ++msg )
-               if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
-                       smp_message_recv( msg, regs );
+       for (msg = 0; msg < 4; msg++)
+               if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
+                       smp_message_recv(msg, regs);
 }
 
 static inline void smp_iSeries_do_message(int cpu, int msg)
@@ -74,48 +74,22 @@ static void smp_iSeries_message_pass(int target, int msg)
                smp_iSeries_do_message(target, msg);
        else {
                for_each_online_cpu(i) {
-                       if (target == MSG_ALL_BUT_SELF
-                           && i == smp_processor_id())
+                       if ((target == MSG_ALL_BUT_SELF) &&
+                                       (i == smp_processor_id()))
                                continue;
                        smp_iSeries_do_message(i, msg);
                }
        }
 }
 
-static int smp_iSeries_numProcs(void)
-{
-       unsigned np, i;
-
-       np = 0;
-        for (i=0; i < NR_CPUS; ++i) {
-                if (paca[i].lppaca.dyn_proc_status < 2) {
-                       cpu_set(i, cpu_possible_map);
-                       cpu_set(i, cpu_present_map);
-                       cpu_set(i, cpu_sibling_map[i]);
-                        ++np;
-                }
-        }
-       return np;
-}
-
 static int smp_iSeries_probe(void)
 {
-       unsigned i;
-       unsigned np = 0;
-
-       for (i=0; i < NR_CPUS; ++i) {
-               if (paca[i].lppaca.dyn_proc_status < 2) {
-                       /*paca[i].active = 1;*/
-                       ++np;
-               }
-       }
-
-       return np;
+       return cpus_weight(cpu_possible_map);
 }
 
 static void smp_iSeries_kick_cpu(int nr)
 {
-       BUG_ON(nr < 0 || nr >= NR_CPUS);
+       BUG_ON((nr < 0) || (nr >= NR_CPUS));
 
        /* Verify that our partition has a processor nr */
        if (paca[nr].lppaca.dyn_proc_status >= 2)
@@ -144,6 +118,4 @@ static struct smp_ops_t iSeries_smp_ops = {
 void __init smp_init_iSeries(void)
 {
        smp_ops = &iSeries_smp_ops;
-       systemcfg->processorCount       = smp_iSeries_numProcs();
 }
-
similarity index 93%
rename from include/asm-ppc64/iSeries/ItSpCommArea.h
rename to arch/powerpc/platforms/iseries/spcomm_area.h
index 5535f82..6e3b685 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * ItSpCommArea.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -17,8 +16,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
-#ifndef _ITSPCOMMAREA_H
-#define _ITSPCOMMAREA_H
+#ifndef _ISERIES_SPCOMM_AREA_H
+#define _ISERIES_SPCOMM_AREA_H
 
 
 struct SpCommArea {
@@ -34,4 +33,4 @@ struct SpCommArea {
 
 extern struct SpCommArea xSpCommArea;
 
-#endif /* _ITSPCOMMAREA_H */
+#endif /* _ISERIES_SPCOMM_AREA_H */
similarity index 99%
rename from arch/ppc64/kernel/iSeries_vio.c
rename to arch/powerpc/platforms/iseries/vio.c
index 6b754b0..c0f7d2e 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <asm/vio.h>
 #include <asm/iommu.h>
+#include <asm/tce.h>
 #include <asm/abs_addr.h>
 #include <asm/page.h>
 #include <asm/iSeries/vio.h>
similarity index 99%
rename from arch/ppc64/kernel/viopath.c
rename to arch/powerpc/platforms/iseries/viopath.c
index 2a6c4f0..c0c767b 100644 (file)
@@ -1,5 +1,4 @@
 /* -*- linux-c -*-
- *  arch/ppc64/kernel/viopath.c
  *
  *  iSeries Virtual I/O Message Path code
  *
@@ -7,7 +6,7 @@
  *           Ryan Arnold <ryanarn@us.ibm.com>
  *           Colin Devilbiss <devilbis@us.ibm.com>
  *
- * (C) Copyright 2000-2003 IBM Corporation
+ * (C) Copyright 2000-2005 IBM Corporation
  *
  * This code is used by the iSeries virtual disk, cd,
  * tape, and console to communicate with OS/400 in another
similarity index 97%
rename from include/asm-ppc64/iSeries/ItVpdAreas.h
rename to arch/powerpc/platforms/iseries/vpd_areas.h
index 71b3ad2..601e6dd 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * ItVpdAreas.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
-#ifndef _ITVPDAREAS_H
-#define _ITVPDAREAS_H
+#ifndef _ISERIES_VPD_AREAS_H
+#define _ISERIES_VPD_AREAS_H
 
 /*
  * This file defines the address and length of all of the VPD area passed to
@@ -86,4 +85,4 @@ struct ItVpdAreas {
 
 extern struct ItVpdAreas       itVpdAreas;
 
-#endif /* _ITVPDAREAS_H */
+#endif /* _ISERIES_VPD_AREAS_H */
similarity index 95%
rename from arch/ppc64/kernel/iSeries_VpdInfo.c
rename to arch/powerpc/platforms/iseries/vpdinfo.c
index 5d92179..9c31884 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * File iSeries_vpdInfo.c created by Allan Trautman on Fri Feb  2 2001.
- *
  * This code gets the card location of the hardware
  * Copyright (C) 2001  <Allan H Trautman> <IBM Corp>
  * Copyright (C) 2005  Stephen Rothwel, IBM Corp
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+
 #include <asm/types.h>
 #include <asm/resource.h>
-
-#include <asm/iSeries/HvCallPci.h>
+#include <asm/abs_addr.h>
+#include <asm/pci-bridge.h>
 #include <asm/iSeries/HvTypes.h>
-#include <asm/iSeries/iSeries_pci.h>
+
+#include "pci.h"
+#include "call_pci.h"
 
 /*
  * Size of Bus VPD data
@@ -214,7 +215,7 @@ static void __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent,
                printk("PCI: Bus VPD Buffer allocation failure.\n");
                return;
        }
-       BusVpdLen = HvCallPci_getBusVpd(bus, ISERIES_HV_ADDR(BusVpdPtr),
+       BusVpdLen = HvCallPci_getBusVpd(bus, iseries_hv_addr(BusVpdPtr),
                                        BUS_VPDSIZE);
        if (BusVpdLen == 0) {
                printk("PCI: Bus VPD Buffer zero length.\n");
@@ -242,7 +243,8 @@ out_free:
  */
 void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
 {
-       struct iSeries_Device_Node *DevNode = PciDev->sysdata;
+       struct device_node *DevNode = PciDev->sysdata;
+       struct pci_dn *pdn;
        u16 bus;
        u8 frame;
        char card[4];
@@ -255,8 +257,9 @@ void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
                return;
        }
 
-       bus = ISERIES_BUS(DevNode);
-       subbus = ISERIES_SUBBUS(DevNode);
+       pdn = PCI_DN(DevNode);
+       bus = pdn->busno;
+       subbus = pdn->bussubno;
        agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
                        ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
        iSeries_Get_Location_Code(bus, agent, &frame, card);
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
new file mode 100644 (file)
index 0000000..4369676
--- /dev/null
@@ -0,0 +1,8 @@
+obj-y                          += pic.o setup.o time.o feature.o pci.o \
+                                  sleep.o low_i2c.o cache.o
+obj-$(CONFIG_PMAC_BACKLIGHT)   += backlight.o
+obj-$(CONFIG_CPU_FREQ_PMAC)    += cpufreq.o
+obj-$(CONFIG_NVRAM)            += nvram.o
+# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
+obj-$(CONFIG_PPC64)            += nvram.o
+obj-$(CONFIG_SMP)              += smp.o
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
new file mode 100644 (file)
index 0000000..8be2f7d
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Miscellaneous procedures for dealing with the PowerMac hardware.
+ * Contains support for the backlight.
+ *
+ *   Copyright (C) 2000 Benjamin Herrenschmidt
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/reboot.h>
+#include <linux/nvram.h>
+#include <linux/console.h>
+#include <asm/sections.h>
+#include <asm/ptrace.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/nvram.h>
+#include <asm/backlight.h>
+
+#include <linux/adb.h>
+#include <linux/pmu.h>
+
+static struct backlight_controller *backlighter;
+static void* backlighter_data;
+static int backlight_autosave;
+static int backlight_level = BACKLIGHT_MAX;
+static int backlight_enabled = 1;
+static int backlight_req_level = -1;
+static int backlight_req_enable = -1;
+
+static void backlight_callback(void *);
+static DECLARE_WORK(backlight_work, backlight_callback, NULL);
+
+void register_backlight_controller(struct backlight_controller *ctrler,
+                                         void *data, char *type)
+{
+       struct device_node* bk_node;
+       char *prop;
+       int valid = 0;
+
+       /* There's already a matching controller, bail out */
+       if (backlighter != NULL)
+               return;
+
+       bk_node = find_devices("backlight");
+
+#ifdef CONFIG_ADB_PMU
+       /* Special case for the old PowerBook since I can't test on it */
+       backlight_autosave = machine_is_compatible("AAPL,3400/2400")
+               || machine_is_compatible("AAPL,3500");
+       if ((backlight_autosave
+            || machine_is_compatible("AAPL,PowerBook1998")
+            || machine_is_compatible("PowerBook1,1"))
+           && !strcmp(type, "pmu"))
+               valid = 1;
+#endif
+       if (bk_node) {
+               prop = get_property(bk_node, "backlight-control", NULL);
+               if (prop && !strncmp(prop, type, strlen(type)))
+                       valid = 1;
+       }
+       if (!valid)
+               return;
+       backlighter = ctrler;
+       backlighter_data = data;
+
+       if (bk_node && !backlight_autosave)
+               prop = get_property(bk_node, "bklt", NULL);
+       else
+               prop = NULL;
+       if (prop) {
+               backlight_level = ((*prop)+1) >> 1;
+               if (backlight_level > BACKLIGHT_MAX)
+                       backlight_level = BACKLIGHT_MAX;
+       }
+
+#ifdef CONFIG_ADB_PMU
+       if (backlight_autosave) {
+               struct adb_request req;
+               pmu_request(&req, NULL, 2, 0xd9, 0);
+               while (!req.complete)
+                       pmu_poll();
+               backlight_level = req.reply[0] >> 4;
+       }
+#endif
+       acquire_console_sem();
+       if (!backlighter->set_enable(1, backlight_level, data))
+               backlight_enabled = 1;
+       release_console_sem();
+
+       printk(KERN_INFO "Registered \"%s\" backlight controller,"
+              "level: %d/15\n", type, backlight_level);
+}
+EXPORT_SYMBOL(register_backlight_controller);
+
+void unregister_backlight_controller(struct backlight_controller
+                                           *ctrler, void *data)
+{
+       /* We keep the current backlight level (for now) */
+       if (ctrler == backlighter && data == backlighter_data)
+               backlighter = NULL;
+}
+EXPORT_SYMBOL(unregister_backlight_controller);
+
+static int __set_backlight_enable(int enable)
+{
+       int rc;
+
+       if (!backlighter)
+               return -ENODEV;
+       acquire_console_sem();
+       rc = backlighter->set_enable(enable, backlight_level,
+                                    backlighter_data);
+       if (!rc)
+               backlight_enabled = enable;
+       release_console_sem();
+       return rc;
+}
+int set_backlight_enable(int enable)
+{
+       if (!backlighter)
+               return -ENODEV;
+       backlight_req_enable = enable;
+       schedule_work(&backlight_work);
+       return 0;
+}
+
+EXPORT_SYMBOL(set_backlight_enable);
+
+int get_backlight_enable(void)
+{
+       if (!backlighter)
+               return -ENODEV;
+       return backlight_enabled;
+}
+EXPORT_SYMBOL(get_backlight_enable);
+
+static int __set_backlight_level(int level)
+{
+       int rc = 0;
+
+       if (!backlighter)
+               return -ENODEV;
+       if (level < BACKLIGHT_MIN)
+               level = BACKLIGHT_OFF;
+       if (level > BACKLIGHT_MAX)
+               level = BACKLIGHT_MAX;
+       acquire_console_sem();
+       if (backlight_enabled)
+               rc = backlighter->set_level(level, backlighter_data);
+       if (!rc)
+               backlight_level = level;
+       release_console_sem();
+       if (!rc && !backlight_autosave) {
+               level <<=1;
+               if (level & 0x10)
+                       level |= 0x01;
+               // -- todo: save to property "bklt"
+       }
+       return rc;
+}
+int set_backlight_level(int level)
+{
+       if (!backlighter)
+               return -ENODEV;
+       backlight_req_level = level;
+       schedule_work(&backlight_work);
+       return 0;
+}
+
+EXPORT_SYMBOL(set_backlight_level);
+
+int get_backlight_level(void)
+{
+       if (!backlighter)
+               return -ENODEV;
+       return backlight_level;
+}
+EXPORT_SYMBOL(get_backlight_level);
+
+static void backlight_callback(void *dummy)
+{
+       int level, enable;
+
+       do {
+               level = backlight_req_level;
+               enable = backlight_req_enable;
+               mb();
+
+               if (level >= 0)
+                       __set_backlight_level(level);
+               if (enable >= 0)
+                       __set_backlight_enable(enable);
+       } while(cmpxchg(&backlight_req_level, level, -1) != level ||
+               cmpxchg(&backlight_req_enable, enable, -1) != enable);
+}
diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S
new file mode 100644 (file)
index 0000000..fb977de
--- /dev/null
@@ -0,0 +1,359 @@
+/*
+ * This file contains low-level cache management functions
+ * used for sleep and CPU speed changes on Apple machines.
+ * (In fact the only thing that is Apple-specific is that we assume
+ * that we can read from ROM at physical address 0xfff00000.)
+ *
+ *    Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
+ *                       Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+#include <asm/cputable.h>
+
+/*
+ * Flush and disable all data caches (dL1, L2, L3). This is used
+ * when going to sleep, when doing a PMU based cpufreq transition,
+ * or when "offlining" a CPU on SMP machines. This code is over
+ * paranoid, but I've had enough issues with various CPU revs and
+ * bugs that I decided it was worth beeing over cautious
+ */
+
+_GLOBAL(flush_disable_caches)
+#ifndef CONFIG_6xx
+       blr
+#else
+BEGIN_FTR_SECTION
+       b       flush_disable_745x
+END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
+BEGIN_FTR_SECTION
+       b       flush_disable_75x
+END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
+       b       __flush_disable_L1
+
+/* This is the code for G3 and 74[01]0 */
+flush_disable_75x:
+       mflr    r10
+
+       /* Turn off EE and DR in MSR */
+       mfmsr   r11
+       rlwinm  r0,r11,0,~MSR_EE
+       rlwinm  r0,r0,0,~MSR_DR
+       sync
+       mtmsr   r0
+       isync
+
+       /* Stop DST streams */
+BEGIN_FTR_SECTION
+       DSSALL
+       sync
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+
+       /* Stop DPM */
+       mfspr   r8,SPRN_HID0            /* Save SPRN_HID0 in r8 */
+       rlwinm  r4,r8,0,12,10           /* Turn off HID0[DPM] */
+       sync
+       mtspr   SPRN_HID0,r4            /* Disable DPM */
+       sync
+
+       /* Disp-flush L1. We have a weird problem here that I never
+        * totally figured out. On 750FX, using the ROM for the flush
+        * results in a non-working flush. We use that workaround for
+        * now until I finally understand what's going on. --BenH
+        */
+
+       /* ROM base by default */
+       lis     r4,0xfff0
+       mfpvr   r3
+       srwi    r3,r3,16
+       cmplwi  cr0,r3,0x7000
+       bne+    1f
+       /* RAM base on 750FX */
+       li      r4,0
+1:     li      r4,0x4000
+       mtctr   r4
+1:     lwz     r0,0(r4)
+       addi    r4,r4,32
+       bdnz    1b
+       sync
+       isync
+
+       /* Disable / invalidate / enable L1 data */
+       mfspr   r3,SPRN_HID0
+       rlwinm  r3,r3,0,~(HID0_DCE | HID0_ICE)
+       mtspr   SPRN_HID0,r3
+       sync
+       isync
+       ori     r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
+       sync
+       isync
+       mtspr   SPRN_HID0,r3
+       xori    r3,r3,(HID0_DCI|HID0_ICFI)
+       mtspr   SPRN_HID0,r3
+       sync
+
+       /* Get the current enable bit of the L2CR into r4 */
+       mfspr   r5,SPRN_L2CR
+       /* Set to data-only (pre-745x bit) */
+       oris    r3,r5,L2CR_L2DO@h
+       b       2f
+       /* When disabling L2, code must be in L1 */
+       .balign 32
+1:     mtspr   SPRN_L2CR,r3
+3:     sync
+       isync
+       b       1f
+2:     b       3f
+3:     sync
+       isync
+       b       1b
+1:     /* disp-flush L2. The interesting thing here is that the L2 can be
+        * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
+        * but that is probbaly fine. We disp-flush over 4Mb to be safe
+        */
+       lis     r4,2
+       mtctr   r4
+       lis     r4,0xfff0
+1:     lwz     r0,0(r4)
+       addi    r4,r4,32
+       bdnz    1b
+       sync
+       isync
+       lis     r4,2
+       mtctr   r4
+       lis     r4,0xfff0
+1:     dcbf    0,r4
+       addi    r4,r4,32
+       bdnz    1b
+       sync
+       isync
+
+       /* now disable L2 */
+       rlwinm  r5,r5,0,~L2CR_L2E
+       b       2f
+       /* When disabling L2, code must be in L1 */
+       .balign 32
+1:     mtspr   SPRN_L2CR,r5
+3:     sync
+       isync
+       b       1f
+2:     b       3f
+3:     sync
+       isync
+       b       1b
+1:     sync
+       isync
+       /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
+       oris    r4,r5,L2CR_L2I@h
+       mtspr   SPRN_L2CR,r4
+       sync
+       isync
+
+       /* Wait for the invalidation to complete */
+1:     mfspr   r3,SPRN_L2CR
+       rlwinm. r0,r3,0,31,31
+       bne     1b
+
+       /* Clear L2I */
+       xoris   r4,r4,L2CR_L2I@h
+       sync
+       mtspr   SPRN_L2CR,r4
+       sync
+
+       /* now disable the L1 data cache */
+       mfspr   r0,SPRN_HID0
+       rlwinm  r0,r0,0,~(HID0_DCE|HID0_ICE)
+       mtspr   SPRN_HID0,r0
+       sync
+       isync
+
+       /* Restore HID0[DPM] to whatever it was before */
+       sync
+       mfspr   r0,SPRN_HID0
+       rlwimi  r0,r8,0,11,11           /* Turn back HID0[DPM] */
+       mtspr   SPRN_HID0,r0
+       sync
+
+       /* restore DR and EE */
+       sync
+       mtmsr   r11
+       isync
+
+       mtlr    r10
+       blr
+
+/* This code is for 745x processors */
+flush_disable_745x:
+       /* Turn off EE and DR in MSR */
+       mfmsr   r11
+       rlwinm  r0,r11,0,~MSR_EE
+       rlwinm  r0,r0,0,~MSR_DR
+       sync
+       mtmsr   r0
+       isync
+
+       /* Stop prefetch streams */
+       DSSALL
+       sync
+
+       /* Disable L2 prefetching */
+       mfspr   r0,SPRN_MSSCR0
+       rlwinm  r0,r0,0,0,29
+       mtspr   SPRN_MSSCR0,r0
+       sync
+       isync
+       lis     r4,0
+       dcbf    0,r4
+       dcbf    0,r4
+       dcbf    0,r4
+       dcbf    0,r4
+       dcbf    0,r4
+       dcbf    0,r4
+       dcbf    0,r4
+       dcbf    0,r4
+
+       /* Due to a bug with the HW flush on some CPU revs, we occasionally
+        * experience data corruption. I'm adding a displacement flush along
+        * with a dcbf loop over a few Mb to "help". The problem isn't totally
+        * fixed by this in theory, but at least, in practice, I couldn't reproduce
+        * it even with a big hammer...
+        */
+
+        lis     r4,0x0002
+        mtctr   r4
+       li      r4,0
+1:
+        lwz     r0,0(r4)
+        addi    r4,r4,32                /* Go to start of next cache line */
+        bdnz    1b
+        isync
+
+        /* Now, flush the first 4MB of memory */
+        lis     r4,0x0002
+        mtctr   r4
+       li      r4,0
+        sync
+1:
+        dcbf    0,r4
+        addi    r4,r4,32                /* Go to start of next cache line */
+        bdnz    1b
+
+       /* Flush and disable the L1 data cache */
+       mfspr   r6,SPRN_LDSTCR
+       lis     r3,0xfff0       /* read from ROM for displacement flush */
+       li      r4,0xfe         /* start with only way 0 unlocked */
+       li      r5,128          /* 128 lines in each way */
+1:     mtctr   r5
+       rlwimi  r6,r4,0,24,31
+       mtspr   SPRN_LDSTCR,r6
+       sync
+       isync
+2:     lwz     r0,0(r3)        /* touch each cache line */
+       addi    r3,r3,32
+       bdnz    2b
+       rlwinm  r4,r4,1,24,30   /* move on to the next way */
+       ori     r4,r4,1
+       cmpwi   r4,0xff         /* all done? */
+       bne     1b
+       /* now unlock the L1 data cache */
+       li      r4,0
+       rlwimi  r6,r4,0,24,31
+       sync
+       mtspr   SPRN_LDSTCR,r6
+       sync
+       isync
+
+       /* Flush the L2 cache using the hardware assist */
+       mfspr   r3,SPRN_L2CR
+       cmpwi   r3,0            /* check if it is enabled first */
+       bge     4f
+       oris    r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
+       b       2f
+       /* When disabling/locking L2, code must be in L1 */
+       .balign 32
+1:     mtspr   SPRN_L2CR,r0    /* lock the L2 cache */
+3:     sync
+       isync
+       b       1f
+2:     b       3f
+3:     sync
+       isync
+       b       1b
+1:     sync
+       isync
+       ori     r0,r3,L2CR_L2HWF_745x
+       sync
+       mtspr   SPRN_L2CR,r0    /* set the hardware flush bit */
+3:     mfspr   r0,SPRN_L2CR    /* wait for it to go to 0 */
+       andi.   r0,r0,L2CR_L2HWF_745x
+       bne     3b
+       sync
+       rlwinm  r3,r3,0,~L2CR_L2E
+       b       2f
+       /* When disabling L2, code must be in L1 */
+       .balign 32
+1:     mtspr   SPRN_L2CR,r3    /* disable the L2 cache */
+3:     sync
+       isync
+       b       1f
+2:     b       3f
+3:     sync
+       isync
+       b       1b
+1:     sync
+       isync
+       oris    r4,r3,L2CR_L2I@h
+       mtspr   SPRN_L2CR,r4
+       sync
+       isync
+1:     mfspr   r4,SPRN_L2CR
+       andis.  r0,r4,L2CR_L2I@h
+       bne     1b
+       sync
+
+BEGIN_FTR_SECTION
+       /* Flush the L3 cache using the hardware assist */
+4:     mfspr   r3,SPRN_L3CR
+       cmpwi   r3,0            /* check if it is enabled */
+       bge     6f
+       oris    r0,r3,L3CR_L3IO@h
+       ori     r0,r0,L3CR_L3DO
+       sync
+       mtspr   SPRN_L3CR,r0    /* lock the L3 cache */
+       sync
+       isync
+       ori     r0,r0,L3CR_L3HWF
+       sync
+       mtspr   SPRN_L3CR,r0    /* set the hardware flush bit */
+5:     mfspr   r0,SPRN_L3CR    /* wait for it to go to zero */
+       andi.   r0,r0,L3CR_L3HWF
+       bne     5b
+       rlwinm  r3,r3,0,~L3CR_L3E
+       sync
+       mtspr   SPRN_L3CR,r3    /* disable the L3 cache */
+       sync
+       ori     r4,r3,L3CR_L3I
+       mtspr   SPRN_L3CR,r4
+1:     mfspr   r4,SPRN_L3CR
+       andi.   r0,r4,L3CR_L3I
+       bne     1b
+       sync
+END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
+
+6:     mfspr   r0,SPRN_HID0    /* now disable the L1 data cache */
+       rlwinm  r0,r0,0,~HID0_DCE
+       mtspr   SPRN_HID0,r0
+       sync
+       isync
+       mtmsr   r11             /* restore DR and EE */
+       isync
+       blr
+#endif /* CONFIG_6xx */
diff --git a/arch/powerpc/platforms/powermac/cpufreq.c b/arch/powerpc/platforms/powermac/cpufreq.c
new file mode 100644 (file)
index 0000000..c47f8b6
--- /dev/null
@@ -0,0 +1,726 @@
+/*
+ *  arch/ppc/platforms/pmac_cpufreq.c
+ *
+ *  Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ *  Copyright (C) 2004        John Steele Scott <toojays@toojays.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO: Need a big cleanup here. Basically, we need to have different
+ * cpufreq_driver structures for the different type of HW instead of the
+ * current mess. We also need to better deal with the detection of the
+ * type of machine.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/sysdev.h>
+#include <linux/i2c.h>
+#include <linux/hardirq.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/pmac_feature.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/cputable.h>
+#include <asm/time.h>
+#include <asm/system.h>
+#include <asm/mpic.h>
+#include <asm/keylargo.h>
+
+/* WARNING !!! This will cause calibrate_delay() to be called,
+ * but this is an __init function ! So you MUST go edit
+ * init/main.c to make it non-init before enabling DEBUG_FREQ
+ */
+#undef DEBUG_FREQ
+
+/*
+ * There is a problem with the core cpufreq code on SMP kernels,
+ * it won't recalculate the Bogomips properly
+ */
+#ifdef CONFIG_SMP
+#warning "WARNING, CPUFREQ not recommended on SMP kernels"
+#endif
+
+extern void low_choose_7447a_dfs(int dfs);
+extern void low_choose_750fx_pll(int pll);
+extern void low_sleep_handler(void);
+
+/*
+ * Currently, PowerMac cpufreq supports only high & low frequencies
+ * that are set by the firmware
+ */
+static unsigned int low_freq;
+static unsigned int hi_freq;
+static unsigned int cur_freq;
+static unsigned int sleep_freq;
+
+/*
+ * Different models uses different mecanisms to switch the frequency
+ */
+static int (*set_speed_proc)(int low_speed);
+static unsigned int (*get_speed_proc)(void);
+
+/*
+ * Some definitions used by the various speedprocs
+ */
+static u32 voltage_gpio;
+static u32 frequency_gpio;
+static u32 slew_done_gpio;
+static int no_schedule;
+static int has_cpu_l2lve;
+static int is_pmu_based;
+
+/* There are only two frequency states for each processor. Values
+ * are in kHz for the time being.
+ */
+#define CPUFREQ_HIGH                  0
+#define CPUFREQ_LOW                   1
+
+static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
+       {CPUFREQ_HIGH,          0},
+       {CPUFREQ_LOW,           0},
+       {0,                     CPUFREQ_TABLE_END},
+};
+
+static struct freq_attr* pmac_cpu_freqs_attr[] = {
+       &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,
+};
+
+static inline void local_delay(unsigned long ms)
+{
+       if (no_schedule)
+               mdelay(ms);
+       else
+               msleep(ms);
+}
+
+#ifdef DEBUG_FREQ
+static inline void debug_calc_bogomips(void)
+{
+       /* This will cause a recalc of bogomips and display the
+        * result. We backup/restore the value to avoid affecting the
+        * core cpufreq framework's own calculation.
+        */
+       extern void calibrate_delay(void);
+
+       unsigned long save_lpj = loops_per_jiffy;
+       calibrate_delay();
+       loops_per_jiffy = save_lpj;
+}
+#endif /* DEBUG_FREQ */
+
+/* Switch CPU speed under 750FX CPU control
+ */
+static int cpu_750fx_cpu_speed(int low_speed)
+{
+       u32 hid2;
+
+       if (low_speed == 0) {
+               /* ramping up, set voltage first */
+               pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+               /* Make sure we sleep for at least 1ms */
+               local_delay(10);
+
+               /* tweak L2 for high voltage */
+               if (has_cpu_l2lve) {
+                       hid2 = mfspr(SPRN_HID2);
+                       hid2 &= ~0x2000;
+                       mtspr(SPRN_HID2, hid2);
+               }
+       }
+#ifdef CONFIG_6xx
+       low_choose_750fx_pll(low_speed);
+#endif
+       if (low_speed == 1) {
+               /* tweak L2 for low voltage */
+               if (has_cpu_l2lve) {
+                       hid2 = mfspr(SPRN_HID2);
+                       hid2 |= 0x2000;
+                       mtspr(SPRN_HID2, hid2);
+               }
+
+               /* ramping down, set voltage last */
+               pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+               local_delay(10);
+       }
+
+       return 0;
+}
+
+static unsigned int cpu_750fx_get_cpu_speed(void)
+{
+       if (mfspr(SPRN_HID1) & HID1_PS)
+               return low_freq;
+       else
+               return hi_freq;
+}
+
+/* Switch CPU speed using DFS */
+static int dfs_set_cpu_speed(int low_speed)
+{
+       if (low_speed == 0) {
+               /* ramping up, set voltage first */
+               pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+               /* Make sure we sleep for at least 1ms */
+               local_delay(1);
+       }
+
+       /* set frequency */
+#ifdef CONFIG_6xx
+       low_choose_7447a_dfs(low_speed);
+#endif
+       udelay(100);
+
+       if (low_speed == 1) {
+               /* ramping down, set voltage last */
+               pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+               local_delay(1);
+       }
+
+       return 0;
+}
+
+static unsigned int dfs_get_cpu_speed(void)
+{
+       if (mfspr(SPRN_HID1) & HID1_DFS)
+               return low_freq;
+       else
+               return hi_freq;
+}
+
+
+/* Switch CPU speed using slewing GPIOs
+ */
+static int gpios_set_cpu_speed(int low_speed)
+{
+       int gpio, timeout = 0;
+
+       /* If ramping up, set voltage first */
+       if (low_speed == 0) {
+               pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+               /* Delay is way too big but it's ok, we schedule */
+               local_delay(10);
+       }
+
+       /* Set frequency */
+       gpio =  pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
+       if (low_speed == ((gpio & 0x01) == 0))
+               goto skip;
+
+       pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
+                         low_speed ? 0x04 : 0x05);
+       udelay(200);
+       do {
+               if (++timeout > 100)
+                       break;
+               local_delay(1);
+               gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
+       } while((gpio & 0x02) == 0);
+ skip:
+       /* If ramping down, set voltage last */
+       if (low_speed == 1) {
+               pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+               /* Delay is way too big but it's ok, we schedule */
+               local_delay(10);
+       }
+
+#ifdef DEBUG_FREQ
+       debug_calc_bogomips();
+#endif
+
+       return 0;
+}
+
+/* Switch CPU speed under PMU control
+ */
+static int pmu_set_cpu_speed(int low_speed)
+{
+       struct adb_request req;
+       unsigned long save_l2cr;
+       unsigned long save_l3cr;
+       unsigned int pic_prio;
+       unsigned long flags;
+
+       preempt_disable();
+
+#ifdef DEBUG_FREQ
+       printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
+#endif
+       pmu_suspend();
+
+       /* Disable all interrupt sources on openpic */
+       pic_prio = mpic_cpu_get_priority();
+       mpic_cpu_set_priority(0xf);
+
+       /* Make sure the decrementer won't interrupt us */
+       asm volatile("mtdec %0" : : "r" (0x7fffffff));
+       /* Make sure any pending DEC interrupt occuring while we did
+        * the above didn't re-enable the DEC */
+       mb();
+       asm volatile("mtdec %0" : : "r" (0x7fffffff));
+
+       /* We can now disable MSR_EE */
+       local_irq_save(flags);
+
+       /* Giveup the FPU & vec */
+       enable_kernel_fp();
+
+#ifdef CONFIG_ALTIVEC
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               enable_kernel_altivec();
+#endif /* CONFIG_ALTIVEC */
+
+       /* Save & disable L2 and L3 caches */
+       save_l3cr = _get_L3CR();        /* (returns -1 if not available) */
+       save_l2cr = _get_L2CR();        /* (returns -1 if not available) */
+
+       /* Send the new speed command. My assumption is that this command
+        * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
+        */
+       pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
+       while (!req.complete)
+               pmu_poll();
+
+       /* Prepare the northbridge for the speed transition */
+       pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
+
+       /* Call low level code to backup CPU state and recover from
+        * hardware reset
+        */
+       low_sleep_handler();
+
+       /* Restore the northbridge */
+       pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
+
+       /* Restore L2 cache */
+       if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
+               _set_L2CR(save_l2cr);
+       /* Restore L3 cache */
+       if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
+               _set_L3CR(save_l3cr);
+
+       /* Restore userland MMU context */
+       set_context(current->active_mm->context, current->active_mm->pgd);
+
+#ifdef DEBUG_FREQ
+       printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
+#endif
+
+       /* Restore low level PMU operations */
+       pmu_unlock();
+
+       /* Restore decrementer */
+       wakeup_decrementer();
+
+       /* Restore interrupts */
+       mpic_cpu_set_priority(pic_prio);
+
+       /* Let interrupts flow again ... */
+       local_irq_restore(flags);
+
+#ifdef DEBUG_FREQ
+       debug_calc_bogomips();
+#endif
+
+       pmu_resume();
+
+       preempt_enable();
+
+       return 0;
+}
+
+static int do_set_cpu_speed(int speed_mode, int notify)
+{
+       struct cpufreq_freqs freqs;
+       unsigned long l3cr;
+       static unsigned long prev_l3cr;
+
+       freqs.old = cur_freq;
+       freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
+       freqs.cpu = smp_processor_id();
+
+       if (freqs.old == freqs.new)
+               return 0;
+
+       if (notify)
+               cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+       if (speed_mode == CPUFREQ_LOW &&
+           cpu_has_feature(CPU_FTR_L3CR)) {
+               l3cr = _get_L3CR();
+               if (l3cr & L3CR_L3E) {
+                       prev_l3cr = l3cr;
+                       _set_L3CR(0);
+               }
+       }
+       set_speed_proc(speed_mode == CPUFREQ_LOW);
+       if (speed_mode == CPUFREQ_HIGH &&
+           cpu_has_feature(CPU_FTR_L3CR)) {
+               l3cr = _get_L3CR();
+               if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
+                       _set_L3CR(prev_l3cr);
+       }
+       if (notify)
+               cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+       cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
+
+       return 0;
+}
+
+static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
+{
+       return cur_freq;
+}
+
+static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
+{
+       return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
+}
+
+static int pmac_cpufreq_target(        struct cpufreq_policy *policy,
+                                       unsigned int target_freq,
+                                       unsigned int relation)
+{
+       unsigned int    newstate = 0;
+
+       if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
+                       target_freq, relation, &newstate))
+               return -EINVAL;
+
+       return do_set_cpu_speed(newstate, 1);
+}
+
+unsigned int pmac_get_one_cpufreq(int i)
+{
+       /* Supports only one CPU for now */
+       return (i == 0) ? cur_freq : 0;
+}
+
+static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+       if (policy->cpu != 0)
+               return -ENODEV;
+
+       policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+       policy->cpuinfo.transition_latency      = CPUFREQ_ETERNAL;
+       policy->cur = cur_freq;
+
+       cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
+       return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
+}
+
+static u32 read_gpio(struct device_node *np)
+{
+       u32 *reg = (u32 *)get_property(np, "reg", NULL);
+       u32 offset;
+
+       if (reg == NULL)
+               return 0;
+       /* That works for all keylargos but shall be fixed properly
+        * some day... The problem is that it seems we can't rely
+        * on the "reg" property of the GPIO nodes, they are either
+        * relative to the base of KeyLargo or to the base of the
+        * GPIO space, and the device-tree doesn't help.
+        */
+       offset = *reg;
+       if (offset < KEYLARGO_GPIO_LEVELS0)
+               offset += KEYLARGO_GPIO_LEVELS0;
+       return offset;
+}
+
+static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
+{
+       /* Ok, this could be made a bit smarter, but let's be robust for now. We
+        * always force a speed change to high speed before sleep, to make sure
+        * we have appropriate voltage and/or bus speed for the wakeup process,
+        * and to make sure our loops_per_jiffies are "good enough", that is will
+        * not cause too short delays if we sleep in low speed and wake in high
+        * speed..
+        */
+       no_schedule = 1;
+       sleep_freq = cur_freq;
+       if (cur_freq == low_freq && !is_pmu_based)
+               do_set_cpu_speed(CPUFREQ_HIGH, 0);
+       return 0;
+}
+
+static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
+{
+       /* If we resume, first check if we have a get() function */
+       if (get_speed_proc)
+               cur_freq = get_speed_proc();
+       else
+               cur_freq = 0;
+
+       /* We don't, hrm... we don't really know our speed here, best
+        * is that we force a switch to whatever it was, which is
+        * probably high speed due to our suspend() routine
+        */
+       do_set_cpu_speed(sleep_freq == low_freq ?
+                        CPUFREQ_LOW : CPUFREQ_HIGH, 0);
+
+       no_schedule = 0;
+       return 0;
+}
+
+static struct cpufreq_driver pmac_cpufreq_driver = {
+       .verify         = pmac_cpufreq_verify,
+       .target         = pmac_cpufreq_target,
+       .get            = pmac_cpufreq_get_speed,
+       .init           = pmac_cpufreq_cpu_init,
+       .suspend        = pmac_cpufreq_suspend,
+       .resume         = pmac_cpufreq_resume,
+       .flags          = CPUFREQ_PM_NO_WARN,
+       .attr           = pmac_cpu_freqs_attr,
+       .name           = "powermac",
+       .owner          = THIS_MODULE,
+};
+
+
+static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
+{
+       struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
+                                                               "voltage-gpio");
+       struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
+                                                               "frequency-gpio");
+       struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
+                                                                    "slewing-done");
+       u32 *value;
+
+       /*
+        * Check to see if it's GPIO driven or PMU only
+        *
+        * The way we extract the GPIO address is slightly hackish, but it
+        * works well enough for now. We need to abstract the whole GPIO
+        * stuff sooner or later anyway
+        */
+
+       if (volt_gpio_np)
+               voltage_gpio = read_gpio(volt_gpio_np);
+       if (freq_gpio_np)
+               frequency_gpio = read_gpio(freq_gpio_np);
+       if (slew_done_gpio_np)
+               slew_done_gpio = read_gpio(slew_done_gpio_np);
+
+       /* If we use the frequency GPIOs, calculate the min/max speeds based
+        * on the bus frequencies
+        */
+       if (frequency_gpio && slew_done_gpio) {
+               int lenp, rc;
+               u32 *freqs, *ratio;
+
+               freqs = (u32 *)get_property(cpunode, "bus-frequencies", &lenp);
+               lenp /= sizeof(u32);
+               if (freqs == NULL || lenp != 2) {
+                       printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
+                       return 1;
+               }
+               ratio = (u32 *)get_property(cpunode, "processor-to-bus-ratio*2", NULL);
+               if (ratio == NULL) {
+                       printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
+                       return 1;
+               }
+
+               /* Get the min/max bus frequencies */
+               low_freq = min(freqs[0], freqs[1]);
+               hi_freq = max(freqs[0], freqs[1]);
+
+               /* Grrrr.. It _seems_ that the device-tree is lying on the low bus
+                * frequency, it claims it to be around 84Mhz on some models while
+                * it appears to be approx. 101Mhz on all. Let's hack around here...
+                * fortunately, we don't need to be too precise
+                */
+               if (low_freq < 98000000)
+                       low_freq = 101000000;
+                       
+               /* Convert those to CPU core clocks */
+               low_freq = (low_freq * (*ratio)) / 2000;
+               hi_freq = (hi_freq * (*ratio)) / 2000;
+
+               /* Now we get the frequencies, we read the GPIO to see what is out current
+                * speed
+                */
+               rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
+               cur_freq = (rc & 0x01) ? hi_freq : low_freq;
+
+               set_speed_proc = gpios_set_cpu_speed;
+               return 1;
+       }
+
+       /* If we use the PMU, look for the min & max frequencies in the
+        * device-tree
+        */
+       value = (u32 *)get_property(cpunode, "min-clock-frequency", NULL);
+       if (!value)
+               return 1;
+       low_freq = (*value) / 1000;
+       /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
+        * here */
+       if (low_freq < 100000)
+               low_freq *= 10;
+
+       value = (u32 *)get_property(cpunode, "max-clock-frequency", NULL);
+       if (!value)
+               return 1;
+       hi_freq = (*value) / 1000;
+       set_speed_proc = pmu_set_cpu_speed;
+       is_pmu_based = 1;
+
+       return 0;
+}
+
+static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
+{
+       struct device_node *volt_gpio_np;
+
+       if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+               return 1;
+
+       volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+       if (volt_gpio_np)
+               voltage_gpio = read_gpio(volt_gpio_np);
+       if (!voltage_gpio){
+               printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
+               return 1;
+       }
+
+       /* OF only reports the high frequency */
+       hi_freq = cur_freq;
+       low_freq = cur_freq/2;
+
+       /* Read actual frequency from CPU */
+       cur_freq = dfs_get_cpu_speed();
+       set_speed_proc = dfs_set_cpu_speed;
+       get_speed_proc = dfs_get_cpu_speed;
+
+       return 0;
+}
+
+static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
+{
+       struct device_node *volt_gpio_np;
+       u32 pvr, *value;
+
+       if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+               return 1;
+
+       hi_freq = cur_freq;
+       value = (u32 *)get_property(cpunode, "reduced-clock-frequency", NULL);
+       if (!value)
+               return 1;
+       low_freq = (*value) / 1000;
+
+       volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+       if (volt_gpio_np)
+               voltage_gpio = read_gpio(volt_gpio_np);
+
+       pvr = mfspr(SPRN_PVR);
+       has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
+
+       set_speed_proc = cpu_750fx_cpu_speed;
+       get_speed_proc = cpu_750fx_get_cpu_speed;
+       cur_freq = cpu_750fx_get_cpu_speed();
+
+       return 0;
+}
+
+/* Currently, we support the following machines:
+ *
+ *  - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
+ *  - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
+ *  - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
+ *  - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
+ *  - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
+ *  - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
+ *  - Recent MacRISC3 laptops
+ *  - All new machines with 7447A CPUs
+ */
+static int __init pmac_cpufreq_setup(void)
+{
+       struct device_node      *cpunode;
+       u32                     *value;
+
+       if (strstr(cmd_line, "nocpufreq"))
+               return 0;
+
+       /* Assume only one CPU */
+       cpunode = find_type_devices("cpu");
+       if (!cpunode)
+               goto out;
+
+       /* Get current cpu clock freq */
+       value = (u32 *)get_property(cpunode, "clock-frequency", NULL);
+       if (!value)
+               goto out;
+       cur_freq = (*value) / 1000;
+
+       /*  Check for 7447A based MacRISC3 */
+       if (machine_is_compatible("MacRISC3") &&
+           get_property(cpunode, "dynamic-power-step", NULL) &&
+           PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
+               pmac_cpufreq_init_7447A(cpunode);
+       /* Check for other MacRISC3 machines */
+       } else if (machine_is_compatible("PowerBook3,4") ||
+                  machine_is_compatible("PowerBook3,5") ||
+                  machine_is_compatible("MacRISC3")) {
+               pmac_cpufreq_init_MacRISC3(cpunode);
+       /* Else check for iBook2 500/600 */
+       } else if (machine_is_compatible("PowerBook4,1")) {
+               hi_freq = cur_freq;
+               low_freq = 400000;
+               set_speed_proc = pmu_set_cpu_speed;
+               is_pmu_based = 1;
+       }
+       /* Else check for TiPb 550 */
+       else if (machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
+               hi_freq = cur_freq;
+               low_freq = 500000;
+               set_speed_proc = pmu_set_cpu_speed;
+               is_pmu_based = 1;
+       }
+       /* Else check for TiPb 400 & 500 */
+       else if (machine_is_compatible("PowerBook3,2")) {
+               /* We only know about the 400 MHz and the 500Mhz model
+                * they both have 300 MHz as low frequency
+                */
+               if (cur_freq < 350000 || cur_freq > 550000)
+                       goto out;
+               hi_freq = cur_freq;
+               low_freq = 300000;
+               set_speed_proc = pmu_set_cpu_speed;
+               is_pmu_based = 1;
+       }
+       /* Else check for 750FX */
+       else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
+               pmac_cpufreq_init_750FX(cpunode);
+out:
+       if (set_speed_proc == NULL)
+               return -ENODEV;
+
+       pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
+       pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
+
+       printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
+       printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
+              low_freq/1000, hi_freq/1000, cur_freq/1000);
+
+       return cpufreq_register_driver(&pmac_cpufreq_driver);
+}
+
+module_init(pmac_cpufreq_setup);
+
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
new file mode 100644 (file)
index 0000000..10f1d94
--- /dev/null
@@ -0,0 +1,3063 @@
+/*
+ *  arch/ppc/platforms/pmac_feature.c
+ *
+ *  Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
+ *                          Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ *  TODO:
+ *
+ *   - Replace mdelay with some schedule loop if possible
+ *   - Shorten some obfuscated delays on some routines (like modem
+ *     power)
+ *   - Refcount some clocks (see darwin)
+ *   - Split split split...
+ *
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <asm/sections.h>
+#include <asm/errno.h>
+#include <asm/ohare.h>
+#include <asm/heathrow.h>
+#include <asm/keylargo.h>
+#include <asm/uninorth.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/dbdma.h>
+#include <asm/pci-bridge.h>
+#include <asm/pmac_low_i2c.h>
+
+#undef DEBUG_FEATURE
+
+#ifdef DEBUG_FEATURE
+#define DBG(fmt...) printk(KERN_DEBUG fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+#ifdef CONFIG_6xx
+extern int powersave_lowspeed;
+#endif
+
+extern int powersave_nap;
+extern struct device_node *k2_skiplist[2];
+
+
+/*
+ * We use a single global lock to protect accesses. Each driver has
+ * to take care of its own locking
+ */
+static DEFINE_SPINLOCK(feature_lock);
+
+#define LOCK(flags)    spin_lock_irqsave(&feature_lock, flags);
+#define UNLOCK(flags)  spin_unlock_irqrestore(&feature_lock, flags);
+
+
+/*
+ * Instance of some macio stuffs
+ */
+struct macio_chip macio_chips[MAX_MACIO_CHIPS];
+
+struct macio_chip *macio_find(struct device_node *child, int type)
+{
+       while(child) {
+               int     i;
+
+               for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
+                       if (child == macio_chips[i].of_node &&
+                           (!type || macio_chips[i].type == type))
+                               return &macio_chips[i];
+               child = child->parent;
+       }
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(macio_find);
+
+static const char *macio_names[] =
+{
+       "Unknown",
+       "Grand Central",
+       "OHare",
+       "OHareII",
+       "Heathrow",
+       "Gatwick",
+       "Paddington",
+       "Keylargo",
+       "Pangea",
+       "Intrepid",
+       "K2"
+};
+
+
+
+/*
+ * Uninorth reg. access. Note that Uni-N regs are big endian
+ */
+
+#define UN_REG(r)      (uninorth_base + ((r) >> 2))
+#define UN_IN(r)       (in_be32(UN_REG(r)))
+#define UN_OUT(r,v)    (out_be32(UN_REG(r), (v)))
+#define UN_BIS(r,v)    (UN_OUT((r), UN_IN(r) | (v)))
+#define UN_BIC(r,v)    (UN_OUT((r), UN_IN(r) & ~(v)))
+
+static struct device_node *uninorth_node;
+static u32 __iomem *uninorth_base;
+static u32 uninorth_rev;
+static int uninorth_u3;
+static void __iomem *u3_ht;
+
+/*
+ * For each motherboard family, we have a table of functions pointers
+ * that handle the various features.
+ */
+
+typedef long (*feature_call)(struct device_node *node, long param, long value);
+
+struct feature_table_entry {
+       unsigned int    selector;
+       feature_call    function;
+};
+
+struct pmac_mb_def
+{
+       const char*                     model_string;
+       const char*                     model_name;
+       int                             model_id;
+       struct feature_table_entry*     features;
+       unsigned long                   board_flags;
+};
+static struct pmac_mb_def pmac_mb;
+
+/*
+ * Here are the chip specific feature functions
+ */
+
+static inline int simple_feature_tweak(struct device_node *node, int type,
+                                      int reg, u32 mask, int value)
+{
+       struct macio_chip*      macio;
+       unsigned long           flags;
+
+       macio = macio_find(node, type);
+       if (!macio)
+               return -ENODEV;
+       LOCK(flags);
+       if (value)
+               MACIO_BIS(reg, mask);
+       else
+               MACIO_BIC(reg, mask);
+       (void)MACIO_IN32(reg);
+       UNLOCK(flags);
+
+       return 0;
+}
+
+#ifndef CONFIG_POWER4
+
+static long ohare_htw_scc_enable(struct device_node *node, long param,
+                                long value)
+{
+       struct macio_chip*      macio;
+       unsigned long           chan_mask;
+       unsigned long           fcr;
+       unsigned long           flags;
+       int                     htw, trans;
+       unsigned long           rmask;
+
+       macio = macio_find(node, 0);
+       if (!macio)
+               return -ENODEV;
+       if (!strcmp(node->name, "ch-a"))
+               chan_mask = MACIO_FLAG_SCCA_ON;
+       else if (!strcmp(node->name, "ch-b"))
+               chan_mask = MACIO_FLAG_SCCB_ON;
+       else
+               return -ENODEV;
+
+       htw = (macio->type == macio_heathrow || macio->type == macio_paddington
+               || macio->type == macio_gatwick);
+       /* On these machines, the HRW_SCC_TRANS_EN_N bit mustn't be touched */
+       trans = (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
+                pmac_mb.model_id != PMAC_TYPE_YIKES);
+       if (value) {
+#ifdef CONFIG_ADB_PMU
+               if ((param & 0xfff) == PMAC_SCC_IRDA)
+                       pmu_enable_irled(1);
+#endif /* CONFIG_ADB_PMU */
+               LOCK(flags);
+               fcr = MACIO_IN32(OHARE_FCR);
+               /* Check if scc cell need enabling */
+               if (!(fcr & OH_SCC_ENABLE)) {
+                       fcr |= OH_SCC_ENABLE;
+                       if (htw) {
+                               /* Side effect: this will also power up the
+                                * modem, but it's too messy to figure out on which
+                                * ports this controls the tranceiver and on which
+                                * it controls the modem
+                                */
+                               if (trans)
+                                       fcr &= ~HRW_SCC_TRANS_EN_N;
+                               MACIO_OUT32(OHARE_FCR, fcr);
+                               fcr |= (rmask = HRW_RESET_SCC);
+                               MACIO_OUT32(OHARE_FCR, fcr);
+                       } else {
+                               fcr |= (rmask = OH_SCC_RESET);
+                               MACIO_OUT32(OHARE_FCR, fcr);
+                       }
+                       UNLOCK(flags);
+                       (void)MACIO_IN32(OHARE_FCR);
+                       mdelay(15);
+                       LOCK(flags);
+                       fcr &= ~rmask;
+                       MACIO_OUT32(OHARE_FCR, fcr);
+               }
+               if (chan_mask & MACIO_FLAG_SCCA_ON)
+                       fcr |= OH_SCCA_IO;
+               if (chan_mask & MACIO_FLAG_SCCB_ON)
+                       fcr |= OH_SCCB_IO;
+               MACIO_OUT32(OHARE_FCR, fcr);
+               macio->flags |= chan_mask;
+               UNLOCK(flags);
+               if (param & PMAC_SCC_FLAG_XMON)
+                       macio->flags |= MACIO_FLAG_SCC_LOCKED;
+       } else {
+               if (macio->flags & MACIO_FLAG_SCC_LOCKED)
+                       return -EPERM;
+               LOCK(flags);
+               fcr = MACIO_IN32(OHARE_FCR);
+               if (chan_mask & MACIO_FLAG_SCCA_ON)
+                       fcr &= ~OH_SCCA_IO;
+               if (chan_mask & MACIO_FLAG_SCCB_ON)
+                       fcr &= ~OH_SCCB_IO;
+               MACIO_OUT32(OHARE_FCR, fcr);
+               if ((fcr & (OH_SCCA_IO | OH_SCCB_IO)) == 0) {
+                       fcr &= ~OH_SCC_ENABLE;
+                       if (htw && trans)
+                               fcr |= HRW_SCC_TRANS_EN_N;
+                       MACIO_OUT32(OHARE_FCR, fcr);
+               }
+               macio->flags &= ~(chan_mask);
+               UNLOCK(flags);
+               mdelay(10);
+#ifdef CONFIG_ADB_PMU
+               if ((param & 0xfff) == PMAC_SCC_IRDA)
+                       pmu_enable_irled(0);
+#endif /* CONFIG_ADB_PMU */
+       }
+       return 0;
+}
+
+static long ohare_floppy_enable(struct device_node *node, long param,
+                               long value)
+{
+       return simple_feature_tweak(node, macio_ohare,
+               OHARE_FCR, OH_FLOPPY_ENABLE, value);
+}
+
+static long ohare_mesh_enable(struct device_node *node, long param, long value)
+{
+       return simple_feature_tweak(node, macio_ohare,
+               OHARE_FCR, OH_MESH_ENABLE, value);
+}
+
+static long ohare_ide_enable(struct device_node *node, long param, long value)
+{
+       switch(param) {
+       case 0:
+               /* For some reason, setting the bit in set_initial_features()
+                * doesn't stick. I'm still investigating... --BenH.
+                */
+               if (value)
+                       simple_feature_tweak(node, macio_ohare,
+                               OHARE_FCR, OH_IOBUS_ENABLE, 1);
+               return simple_feature_tweak(node, macio_ohare,
+                       OHARE_FCR, OH_IDE0_ENABLE, value);
+       case 1:
+               return simple_feature_tweak(node, macio_ohare,
+                       OHARE_FCR, OH_BAY_IDE_ENABLE, value);
+       default:
+               return -ENODEV;
+       }
+}
+
+static long ohare_ide_reset(struct device_node *node, long param, long value)
+{
+       switch(param) {
+       case 0:
+               return simple_feature_tweak(node, macio_ohare,
+                       OHARE_FCR, OH_IDE0_RESET_N, !value);
+       case 1:
+               return simple_feature_tweak(node, macio_ohare,
+                       OHARE_FCR, OH_IDE1_RESET_N, !value);
+       default:
+               return -ENODEV;
+       }
+}
+
+static long ohare_sleep_state(struct device_node *node, long param, long value)
+{
+       struct macio_chip*      macio = &macio_chips[0];
+
+       if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
+               return -EPERM;
+       if (value == 1) {
+               MACIO_BIC(OHARE_FCR, OH_IOBUS_ENABLE);
+       } else if (value == 0) {
+               MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
+       }
+
+       return 0;
+}
+
+static long heathrow_modem_enable(struct device_node *node, long param,
+                                 long value)
+{
+       struct macio_chip*      macio;
+       u8                      gpio;
+       unsigned long           flags;
+
+       macio = macio_find(node, macio_unknown);
+       if (!macio)
+               return -ENODEV;
+       gpio = MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1;
+       if (!value) {
+               LOCK(flags);
+               MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
+               UNLOCK(flags);
+               (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+               mdelay(250);
+       }
+       if (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
+           pmac_mb.model_id != PMAC_TYPE_YIKES) {
+               LOCK(flags);
+               if (value)
+                       MACIO_BIC(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
+               else
+                       MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
+               UNLOCK(flags);
+               (void)MACIO_IN32(HEATHROW_FCR);
+               mdelay(250);
+       }
+       if (value) {
+               LOCK(flags);
+               MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
+               (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+               UNLOCK(flags); mdelay(250); LOCK(flags);
+               MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
+               (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+               UNLOCK(flags); mdelay(250); LOCK(flags);
+               MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
+               (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+               UNLOCK(flags); mdelay(250);
+       }
+       return 0;
+}
+
+static long heathrow_floppy_enable(struct device_node *node, long param,
+                                  long value)
+{
+       return simple_feature_tweak(node, macio_unknown,
+               HEATHROW_FCR,
+               HRW_SWIM_ENABLE|HRW_BAY_FLOPPY_ENABLE,
+               value);
+}
+
+static long heathrow_mesh_enable(struct device_node *node, long param,
+                                long value)
+{
+       struct macio_chip*      macio;
+       unsigned long           flags;
+
+       macio = macio_find(node, macio_unknown);
+       if (!macio)
+               return -ENODEV;
+       LOCK(flags);
+       /* Set clear mesh cell enable */
+       if (value)
+               MACIO_BIS(HEATHROW_FCR, HRW_MESH_ENABLE);
+       else
+               MACIO_BIC(HEATHROW_FCR, HRW_MESH_ENABLE);
+       (void)MACIO_IN32(HEATHROW_FCR);
+       udelay(10);
+       /* Set/Clear termination power */
+       if (value)
+               MACIO_BIC(HEATHROW_MBCR, 0x04000000);
+       else
+               MACIO_BIS(HEATHROW_MBCR, 0x04000000);
+       (void)MACIO_IN32(HEATHROW_MBCR);
+       udelay(10);
+       UNLOCK(flags);
+
+       return 0;
+}
+
+static long heathrow_ide_enable(struct device_node *node, long param,
+                               long value)
+{
+       switch(param) {
+       case 0:
+               return simple_feature_tweak(node, macio_unknown,
+                       HEATHROW_FCR, HRW_IDE0_ENABLE, value);
+       case 1:
+               return simple_feature_tweak(node, macio_unknown,
+                       HEATHROW_FCR, HRW_BAY_IDE_ENABLE, value);
+       default:
+               return -ENODEV;
+       }
+}
+
+static long heathrow_ide_reset(struct device_node *node, long param,
+                              long value)
+{
+       switch(param) {
+       case 0:
+               return simple_feature_tweak(node, macio_unknown,
+                       HEATHROW_FCR, HRW_IDE0_RESET_N, !value);
+       case 1:
+               return simple_feature_tweak(node, macio_unknown,
+                       HEATHROW_FCR, HRW_IDE1_RESET_N, !value);
+       default:
+               return -ENODEV;
+       }
+}
+
+static long heathrow_bmac_enable(struct device_node *node, long param,
+                                long value)
+{
+       struct macio_chip*      macio;
+       unsigned long           flags;
+
+       macio = macio_find(node, 0);
+       if (!macio)
+               return -ENODEV;
+       if (value) {
+               LOCK(flags);
+               MACIO_BIS(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
+               MACIO_BIS(HEATHROW_FCR, HRW_BMAC_RESET);
+               UNLOCK(flags);
+               (void)MACIO_IN32(HEATHROW_FCR);
+               mdelay(10);
+               LOCK(flags);
+               MACIO_BIC(HEATHROW_FCR, HRW_BMAC_RESET);
+               UNLOCK(flags);
+               (void)MACIO_IN32(HEATHROW_FCR);
+               mdelay(10);
+       } else {
+               LOCK(flags);
+               MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
+               UNLOCK(flags);
+       }
+       return 0;
+}
+
+static long heathrow_sound_enable(struct device_node *node, long param,
+                                 long value)
+{
+       struct macio_chip*      macio;
+       unsigned long           flags;
+
+       /* B&W G3 and Yikes don't support that properly (the
+        * sound appear to never come back after beeing shut down).
+        */
+       if (pmac_mb.model_id == PMAC_TYPE_YOSEMITE ||
+           pmac_mb.model_id == PMAC_TYPE_YIKES)
+               return 0;
+
+       macio = macio_find(node, 0);
+       if (!macio)
+               return -ENODEV;
+       if (value) {
+               LOCK(flags);
+               MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+               MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
+               UNLOCK(flags);
+               (void)MACIO_IN32(HEATHROW_FCR);
+       } else {
+               LOCK(flags);
+               MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
+               MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+               UNLOCK(flags);
+       }
+       return 0;
+}
+
+static u32 save_fcr[6];
+static u32 save_mbcr;
+static u32 save_gpio_levels[2];
+static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
+static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
+static u32 save_unin_clock_ctl;
+static struct dbdma_regs save_dbdma[13];
+static struct dbdma_regs save_alt_dbdma[13];
+
+static void dbdma_save(struct macio_chip *macio, struct dbdma_regs *save)
+{
+       int i;
+
+       /* Save state & config of DBDMA channels */
+       for (i = 0; i < 13; i++) {
+               volatile struct dbdma_regs __iomem * chan = (void __iomem *)
+                       (macio->base + ((0x8000+i*0x100)>>2));
+               save[i].cmdptr_hi = in_le32(&chan->cmdptr_hi);
+               save[i].cmdptr = in_le32(&chan->cmdptr);
+               save[i].intr_sel = in_le32(&chan->intr_sel);
+               save[i].br_sel = in_le32(&chan->br_sel);
+               save[i].wait_sel = in_le32(&chan->wait_sel);
+       }
+}
+
+static void dbdma_restore(struct macio_chip *macio, struct dbdma_regs *save)
+{
+       int i;
+
+       /* Save state & config of DBDMA channels */
+       for (i = 0; i < 13; i++) {
+               volatile struct dbdma_regs __iomem * chan = (void __iomem *)
+                       (macio->base + ((0x8000+i*0x100)>>2));
+               out_le32(&chan->control, (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);
+               while (in_le32(&chan->status) & ACTIVE)
+                       mb();
+               out_le32(&chan->cmdptr_hi, save[i].cmdptr_hi);
+               out_le32(&chan->cmdptr, save[i].cmdptr);
+               out_le32(&chan->intr_sel, save[i].intr_sel);
+               out_le32(&chan->br_sel, save[i].br_sel);
+               out_le32(&chan->wait_sel, save[i].wait_sel);
+       }
+}
+
+static void heathrow_sleep(struct macio_chip *macio, int secondary)
+{
+       if (secondary) {
+               dbdma_save(macio, save_alt_dbdma);
+               save_fcr[2] = MACIO_IN32(0x38);
+               save_fcr[3] = MACIO_IN32(0x3c);
+       } else {
+               dbdma_save(macio, save_dbdma);
+               save_fcr[0] = MACIO_IN32(0x38);
+               save_fcr[1] = MACIO_IN32(0x3c);
+               save_mbcr = MACIO_IN32(0x34);
+               /* Make sure sound is shut down */
+               MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
+               MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+               /* This seems to be necessary as well or the fan
+                * keeps coming up and battery drains fast */
+               MACIO_BIC(HEATHROW_FCR, HRW_IOBUS_ENABLE);
+               MACIO_BIC(HEATHROW_FCR, HRW_IDE0_RESET_N);
+               /* Make sure eth is down even if module or sleep
+                * won't work properly */
+               MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE | HRW_BMAC_RESET);
+       }
+       /* Make sure modem is shut down */
+       MACIO_OUT8(HRW_GPIO_MODEM_RESET,
+               MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1);
+       MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
+       MACIO_BIC(HEATHROW_FCR, OH_SCCA_IO|OH_SCCB_IO|HRW_SCC_ENABLE);
+
+       /* Let things settle */
+       (void)MACIO_IN32(HEATHROW_FCR);
+}
+
+static void heathrow_wakeup(struct macio_chip *macio, int secondary)
+{
+       if (secondary) {
+               MACIO_OUT32(0x38, save_fcr[2]);
+               (void)MACIO_IN32(0x38);
+               mdelay(1);
+               MACIO_OUT32(0x3c, save_fcr[3]);
+               (void)MACIO_IN32(0x38);
+               mdelay(10);
+               dbdma_restore(macio, save_alt_dbdma);
+       } else {
+               MACIO_OUT32(0x38, save_fcr[0] | HRW_IOBUS_ENABLE);
+               (void)MACIO_IN32(0x38);
+               mdelay(1);
+               MACIO_OUT32(0x3c, save_fcr[1]);
+               (void)MACIO_IN32(0x38);
+               mdelay(1);
+               MACIO_OUT32(0x34, save_mbcr);
+               (void)MACIO_IN32(0x38);
+               mdelay(10);
+               dbdma_restore(macio, save_dbdma);
+       }
+}
+
+static long heathrow_sleep_state(struct device_node *node, long param,
+                                long value)
+{
+       if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
+               return -EPERM;
+       if (value == 1) {
+               if (macio_chips[1].type == macio_gatwick)
+                       heathrow_sleep(&macio_chips[0], 1);
+               heathrow_sleep(&macio_chips[0], 0);
+       } else if (value == 0) {
+               heathrow_wakeup(&macio_chips[0], 0);
+               if (macio_chips[1].type == macio_gatwick)
+                       heathrow_wakeup(&macio_chips[0], 1);
+       }
+       return 0;
+}
+
+static long core99_scc_enable(struct device_node *node, long param, long value)
+{
+       struct macio_chip*      macio;
+       unsigned long           flags;
+       unsigned long           chan_mask;
+       u32                     fcr;
+
+       macio = macio_find(node, 0);
+       if (!macio)
+               return -ENODEV;
+       if (!strcmp(node->name, "ch-a"))
+               chan_mask = MACIO_FLAG_SCCA_ON;
+       else if (!strcmp(node->name, "ch-b"))
+               chan_mask = MACIO_FLAG_SCCB_ON;
+       else
+               return -ENODEV;
+
+       if (value) {
+               int need_reset_scc = 0;
+               int need_reset_irda = 0;
+
+               LOCK(flags);
+               fcr = MACIO_IN32(KEYLARGO_FCR0);
+               /* Check if scc cell need enabling */
+               if (!(fcr & KL0_SCC_CELL_ENABLE)) {
+                       fcr |= KL0_SCC_CELL_ENABLE;
+                       need_reset_scc = 1;
+               }
+               if (chan_mask & MACIO_FLAG_SCCA_ON) {
+                       fcr |= KL0_SCCA_ENABLE;
+                       /* Don't enable line drivers for I2S modem */
+                       if ((param & 0xfff) == PMAC_SCC_I2S1)
+                               fcr &= ~KL0_SCC_A_INTF_ENABLE;
+                       else
+                               fcr |= KL0_SCC_A_INTF_ENABLE;
+               }
+               if (chan_mask & MACIO_FLAG_SCCB_ON) {
+                       fcr |= KL0_SCCB_ENABLE;
+                       /* Perform irda specific inits */
+                       if ((param & 0xfff) == PMAC_SCC_IRDA) {
+                               fcr &= ~KL0_SCC_B_INTF_ENABLE;
+                               fcr |= KL0_IRDA_ENABLE;
+                               fcr |= KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE;
+                               fcr |= KL0_IRDA_SOURCE1_SEL;
+                               fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
+                               fcr &= ~(KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
+                               need_reset_irda = 1;
+                       } else
+                               fcr |= KL0_SCC_B_INTF_ENABLE;
+               }
+               MACIO_OUT32(KEYLARGO_FCR0, fcr);
+               macio->flags |= chan_mask;
+               if (need_reset_scc)  {
+                       MACIO_BIS(KEYLARGO_FCR0, KL0_SCC_RESET);
+                       (void)MACIO_IN32(KEYLARGO_FCR0);
+                       UNLOCK(flags);
+                       mdelay(15);
+                       LOCK(flags);
+                       MACIO_BIC(KEYLARGO_FCR0, KL0_SCC_RESET);
+               }
+               if (need_reset_irda)  {
+                       MACIO_BIS(KEYLARGO_FCR0, KL0_IRDA_RESET);
+                       (void)MACIO_IN32(KEYLARGO_FCR0);
+                       UNLOCK(flags);
+                       mdelay(15);
+                       LOCK(flags);
+                       MACIO_BIC(KEYLARGO_FCR0, KL0_IRDA_RESET);
+               }
+               UNLOCK(flags);
+               if (param & PMAC_SCC_FLAG_XMON)
+                       macio->flags |= MACIO_FLAG_SCC_LOCKED;
+       } else {
+               if (macio->flags & MACIO_FLAG_SCC_LOCKED)
+                       return -EPERM;
+               LOCK(flags);
+               fcr = MACIO_IN32(KEYLARGO_FCR0);
+               if (chan_mask & MACIO_FLAG_SCCA_ON)
+                       fcr &= ~KL0_SCCA_ENABLE;
+               if (chan_mask & MACIO_FLAG_SCCB_ON) {
+                       fcr &= ~KL0_SCCB_ENABLE;
+                       /* Perform irda specific clears */
+                       if ((param & 0xfff) == PMAC_SCC_IRDA) {
+                               fcr &= ~KL0_IRDA_ENABLE;
+                               fcr &= ~(KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE);
+                               fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
+                               fcr &= ~(KL0_IRDA_SOURCE1_SEL|KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
+                       }
+               }
+               MACIO_OUT32(KEYLARGO_FCR0, fcr);
+               if ((fcr & (KL0_SCCA_ENABLE | KL0_SCCB_ENABLE)) == 0) {
+                       fcr &= ~KL0_SCC_CELL_ENABLE;
+                       MACIO_OUT32(KEYLARGO_FCR0, fcr);
+               }
+               macio->flags &= ~(chan_mask);
+               UNLOCK(flags);
+               mdelay(10);
+       }
+       return 0;
+}
+
+static long
+core99_modem_enable(struct device_node *node, long param, long value)
+{
+       struct macio_chip*      macio;
+       u8                      gpio;
+       unsigned long           flags;
+
+       /* Hack for internal USB modem */
+       if (node == NULL) {
+               if (macio_chips[0].type != macio_keylargo)
+                       return -ENODEV;
+               node = macio_chips[0].of_node;
+       }
+       macio = macio_find(node, 0);
+       if (!macio)
+               return -ENODEV;
+       gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
+       gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
+       gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
+
+       if (!value) {
+               LOCK(flags);
+               MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+               UNLOCK(flags);
+               (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+               mdelay(250);
+       }
+       LOCK(flags);
+       if (value) {
+               MACIO_BIC(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+               UNLOCK(flags);
+               (void)MACIO_IN32(KEYLARGO_FCR2);
+               mdelay(250);
+       } else {
+               MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+               UNLOCK(flags);
+       }
+       if (value) {
+               LOCK(flags);
+               MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+               (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+               UNLOCK(flags); mdelay(250); LOCK(flags);
+               MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+               (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+               UNLOCK(flags); mdelay(250); LOCK(flags);
+               MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+               (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+               UNLOCK(flags); mdelay(250);
+       }
+       return 0;
+}
+
+static long
+pangea_modem_enable(struct device_node *node, long param, long value)
+{
+       struct macio_chip*      macio;
+       u8                      gpio;
+       unsigned long           flags;
+
+       /* Hack for internal USB modem */
+       if (node == NULL) {
+               if (macio_chips[0].type != macio_pangea &&
+                   macio_chips[0].type != macio_intrepid)
+                       return -ENODEV;
+               node = macio_chips[0].of_node;
+       }
+       macio = macio_find(node, 0);
+       if (!macio)
+               return -ENODEV;
+       gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
+       gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
+       gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
+
+       if (!value) {
+               LOCK(flags);
+               MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+               UNLOCK(flags);
+               (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+               mdelay(250);
+       }
+       LOCK(flags);
+       if (value) {
+               MACIO_OUT8(KL_GPIO_MODEM_POWER,
+                       KEYLARGO_GPIO_OUTPUT_ENABLE);
+               UNLOCK(flags);
+               (void)MACIO_IN32(KEYLARGO_FCR2);
+               mdelay(250);
+       } else {
+               MACIO_OUT8(KL_GPIO_MODEM_POWER,
+                       KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
+               UNLOCK(flags);
+       }
+       if (value) {
+               LOCK(flags);
+               MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+               (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+               UNLOCK(flags); mdelay(250); LOCK(flags);
+               MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+               (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+               UNLOCK(flags); mdelay(250); LOCK(flags);
+               MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+               (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+               UNLOCK(flags); mdelay(250);
+       }
+       return 0;
+}
+
+static long
+core99_ata100_enable(struct device_node *node, long value)
+{
+       unsigned long flags;
+       struct pci_dev *pdev = NULL;
+       u8 pbus, pid;
+
+       if (uninorth_rev < 0x24)
+               return -ENODEV;
+
+       LOCK(flags);
+       if (value)
+               UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
+       else
+               UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
+       (void)UN_IN(UNI_N_CLOCK_CNTL);
+       UNLOCK(flags);
+       udelay(20);
+
+       if (value) {
+               if (pci_device_from_OF_node(node, &pbus, &pid) == 0)
+                       pdev = pci_find_slot(pbus, pid);
+               if (pdev == NULL)
+                       return 0;
+               pci_enable_device(pdev);
+               pci_set_master(pdev);
+       }
+       return 0;
+}
+
+static long
+core99_ide_enable(struct device_node *node, long param, long value)
+{
+       /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
+        * based ata-100
+        */
+       switch(param) {
+           case 0:
+               return simple_feature_tweak(node, macio_unknown,
+                       KEYLARGO_FCR1, KL1_EIDE0_ENABLE, value);
+           case 1:
+               return simple_feature_tweak(node, macio_unknown,
+                       KEYLARGO_FCR1, KL1_EIDE1_ENABLE, value);
+           case 2:
+               return simple_feature_tweak(node, macio_unknown,
+                       KEYLARGO_FCR1, KL1_UIDE_ENABLE, value);
+           case 3:
+               return core99_ata100_enable(node, value);
+           default:
+               return -ENODEV;
+       }
+}
+
+static long
+core99_ide_reset(struct device_node *node, long param, long value)
+{
+       switch(param) {
+           case 0:
+               return simple_feature_tweak(node, macio_unknown,
+                       KEYLARGO_FCR1, KL1_EIDE0_RESET_N, !value);
+           case 1:
+               return simple_feature_tweak(node, macio_unknown,
+                       KEYLARGO_FCR1, KL1_EIDE1_RESET_N, !value);
+           case 2:
+               return simple_feature_tweak(node, macio_unknown,
+                       KEYLARGO_FCR1, KL1_UIDE_RESET_N, !value);
+           default:
+               return -ENODEV;
+       }
+}
+
+static long
+core99_gmac_enable(struct device_node *node, long param, long value)
+{
+       unsigned long flags;
+
+       LOCK(flags);
+       if (value)
+               UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
+       else
+               UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
+       (void)UN_IN(UNI_N_CLOCK_CNTL);
+       UNLOCK(flags);
+       udelay(20);
+
+       return 0;
+}
+
+static long
+core99_gmac_phy_reset(struct device_node *node, long param, long value)
+{
+       unsigned long flags;
+       struct macio_chip *macio;
+
+       macio = &macio_chips[0];
+       if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+           macio->type != macio_intrepid)
+               return -ENODEV;
+
+       LOCK(flags);
+       MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE);
+       (void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET);
+       UNLOCK(flags);
+       mdelay(10);
+       LOCK(flags);
+       MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */
+               KEYLARGO_GPIO_OUTOUT_DATA);
+       UNLOCK(flags);
+       mdelay(10);
+
+       return 0;
+}
+
+static long
+core99_sound_chip_enable(struct device_node *node, long param, long value)
+{
+       struct macio_chip*      macio;
+       unsigned long           flags;
+
+       macio = macio_find(node, 0);
+       if (!macio)
+               return -ENODEV;
+
+       /* Do a better probe code, screamer G4 desktops &
+        * iMacs can do that too, add a recalibrate  in
+        * the driver as well
+        */
+       if (pmac_mb.model_id == PMAC_TYPE_PISMO ||
+           pmac_mb.model_id == PMAC_TYPE_TITANIUM) {
+               LOCK(flags);
+               if (value)
+                       MACIO_OUT8(KL_GPIO_SOUND_POWER,
+                               KEYLARGO_GPIO_OUTPUT_ENABLE |
+                               KEYLARGO_GPIO_OUTOUT_DATA);
+               else
+                       MACIO_OUT8(KL_GPIO_SOUND_POWER,
+                               KEYLARGO_GPIO_OUTPUT_ENABLE);
+               (void)MACIO_IN8(KL_GPIO_SOUND_POWER);
+               UNLOCK(flags);
+       }
+       return 0;
+}
+
+static long
+core99_airport_enable(struct device_node *node, long param, long value)
+{
+       struct macio_chip*      macio;
+       unsigned long           flags;
+       int                     state;
+
+       macio = macio_find(node, 0);
+       if (!macio)
+               return -ENODEV;
+
+       /* Hint: we allow passing of macio itself for the sake of the
+        * sleep code
+        */
+       if (node != macio->of_node &&
+           (!node->parent || node->parent != macio->of_node))
+               return -ENODEV;
+       state = (macio->flags & MACIO_FLAG_AIRPORT_ON) != 0;
+       if (value == state)
+               return 0;
+       if (value) {
+               /* This code is a reproduction of OF enable-cardslot
+                * and init-wireless methods, slightly hacked until
+                * I got it working.
+                */
+               LOCK(flags);
+               MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 5);
+               (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
+               UNLOCK(flags);
+               mdelay(10);
+               LOCK(flags);
+               MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 4);
+               (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
+               UNLOCK(flags);
+
+               mdelay(10);
+
+               LOCK(flags);
+               MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
+               (void)MACIO_IN32(KEYLARGO_FCR2);
+               udelay(10);
+               MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xb, 0);
+               (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xb);
+               udelay(10);
+               MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xa, 0x28);
+               (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xa);
+               udelay(10);
+               MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xd, 0x28);
+               (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xd);
+               udelay(10);
+               MACIO_OUT8(KEYLARGO_GPIO_0+0xd, 0x28);
+               (void)MACIO_IN8(KEYLARGO_GPIO_0+0xd);
+               udelay(10);
+               MACIO_OUT8(KEYLARGO_GPIO_0+0xe, 0x28);
+               (void)MACIO_IN8(KEYLARGO_GPIO_0+0xe);
+               UNLOCK(flags);
+               udelay(10);
+               MACIO_OUT32(0x1c000, 0);
+               mdelay(1);
+               MACIO_OUT8(0x1a3e0, 0x41);
+               (void)MACIO_IN8(0x1a3e0);
+               udelay(10);
+               LOCK(flags);
+               MACIO_BIS(KEYLARGO_FCR2, KL2_CARDSEL_16);
+               (void)MACIO_IN32(KEYLARGO_FCR2);
+               UNLOCK(flags);
+               mdelay(100);
+
+               macio->flags |= MACIO_FLAG_AIRPORT_ON;
+       } else {
+               LOCK(flags);
+               MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
+               (void)MACIO_IN32(KEYLARGO_FCR2);
+               MACIO_OUT8(KL_GPIO_AIRPORT_0, 0);
+               MACIO_OUT8(KL_GPIO_AIRPORT_1, 0);
+               MACIO_OUT8(KL_GPIO_AIRPORT_2, 0);
+               MACIO_OUT8(KL_GPIO_AIRPORT_3, 0);
+               MACIO_OUT8(KL_GPIO_AIRPORT_4, 0);
+               (void)MACIO_IN8(KL_GPIO_AIRPORT_4);
+               UNLOCK(flags);
+
+               macio->flags &= ~MACIO_FLAG_AIRPORT_ON;
+       }
+       return 0;
+}
+
+#ifdef CONFIG_SMP
+static long
+core99_reset_cpu(struct device_node *node, long param, long value)
+{
+       unsigned int reset_io = 0;
+       unsigned long flags;
+       struct macio_chip *macio;
+       struct device_node *np;
+       const int dflt_reset_lines[] = {        KL_GPIO_RESET_CPU0,
+                                               KL_GPIO_RESET_CPU1,
+                                               KL_GPIO_RESET_CPU2,
+                                               KL_GPIO_RESET_CPU3 };
+
+       macio = &macio_chips[0];
+       if (macio->type != macio_keylargo)
+               return -ENODEV;
+
+       np = find_path_device("/cpus");
+       if (np == NULL)
+               return -ENODEV;
+       for (np = np->child; np != NULL; np = np->sibling) {
+               u32 *num = (u32 *)get_property(np, "reg", NULL);
+               u32 *rst = (u32 *)get_property(np, "soft-reset", NULL);
+               if (num == NULL || rst == NULL)
+                       continue;
+               if (param == *num) {
+                       reset_io = *rst;
+                       break;
+               }
+       }
+       if (np == NULL || reset_io == 0)
+               reset_io = dflt_reset_lines[param];
+
+       LOCK(flags);
+       MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
+       (void)MACIO_IN8(reset_io);
+       udelay(1);
+       MACIO_OUT8(reset_io, 0);
+       (void)MACIO_IN8(reset_io);
+       UNLOCK(flags);
+
+       return 0;
+}
+#endif /* CONFIG_SMP */
+
+static long
+core99_usb_enable(struct device_node *node, long param, long value)
+{
+       struct macio_chip *macio;
+       unsigned long flags;
+       char *prop;
+       int number;
+       u32 reg;
+
+       macio = &macio_chips[0];
+       if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+           macio->type != macio_intrepid)
+               return -ENODEV;
+
+       prop = (char *)get_property(node, "AAPL,clock-id", NULL);
+       if (!prop)
+               return -ENODEV;
+       if (strncmp(prop, "usb0u048", 8) == 0)
+               number = 0;
+       else if (strncmp(prop, "usb1u148", 8) == 0)
+               number = 2;
+       else if (strncmp(prop, "usb2u248", 8) == 0)
+               number = 4;
+       else
+               return -ENODEV;
+
+       /* Sorry for the brute-force locking, but this is only used during
+        * sleep and the timing seem to be critical
+        */
+       LOCK(flags);
+       if (value) {
+               /* Turn ON */
+               if (number == 0) {
+                       MACIO_BIC(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
+                       (void)MACIO_IN32(KEYLARGO_FCR0);
+                       UNLOCK(flags);
+                       mdelay(1);
+                       LOCK(flags);
+                       MACIO_BIS(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
+               } else if (number == 2) {
+                       MACIO_BIC(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
+                       UNLOCK(flags);
+                       (void)MACIO_IN32(KEYLARGO_FCR0);
+                       mdelay(1);
+                       LOCK(flags);
+                       MACIO_BIS(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
+               } else if (number == 4) {
+                       MACIO_BIC(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
+                       UNLOCK(flags);
+                       (void)MACIO_IN32(KEYLARGO_FCR1);
+                       mdelay(1);
+                       LOCK(flags);
+                       MACIO_BIS(KEYLARGO_FCR1, KL1_USB2_CELL_ENABLE);
+               }
+               if (number < 4) {
+                       reg = MACIO_IN32(KEYLARGO_FCR4);
+                       reg &=  ~(KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
+                               KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number));
+                       reg &=  ~(KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
+                               KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1));
+                       MACIO_OUT32(KEYLARGO_FCR4, reg);
+                       (void)MACIO_IN32(KEYLARGO_FCR4);
+                       udelay(10);
+               } else {
+                       reg = MACIO_IN32(KEYLARGO_FCR3);
+                       reg &=  ~(KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
+                               KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0));
+                       reg &=  ~(KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
+                               KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1));
+                       MACIO_OUT32(KEYLARGO_FCR3, reg);
+                       (void)MACIO_IN32(KEYLARGO_FCR3);
+                       udelay(10);
+               }
+               if (macio->type == macio_intrepid) {
+                       /* wait for clock stopped bits to clear */
+                       u32 test0 = 0, test1 = 0;
+                       u32 status0, status1;
+                       int timeout = 1000;
+
+                       UNLOCK(flags);
+                       switch (number) {
+                       case 0:
+                               test0 = UNI_N_CLOCK_STOPPED_USB0;
+                               test1 = UNI_N_CLOCK_STOPPED_USB0PCI;
+                               break;
+                       case 2:
+                               test0 = UNI_N_CLOCK_STOPPED_USB1;
+                               test1 = UNI_N_CLOCK_STOPPED_USB1PCI;
+                               break;
+                       case 4:
+                               test0 = UNI_N_CLOCK_STOPPED_USB2;
+                               test1 = UNI_N_CLOCK_STOPPED_USB2PCI;
+                               break;
+                       }
+                       do {
+                               if (--timeout <= 0) {
+                                       printk(KERN_ERR "core99_usb_enable: "
+                                              "Timeout waiting for clocks\n");
+                                       break;
+                               }
+                               mdelay(1);
+                               status0 = UN_IN(UNI_N_CLOCK_STOP_STATUS0);
+                               status1 = UN_IN(UNI_N_CLOCK_STOP_STATUS1);
+                       } while ((status0 & test0) | (status1 & test1));
+                       LOCK(flags);
+               }
+       } else {
+               /* Turn OFF */
+               if (number < 4) {
+                       reg = MACIO_IN32(KEYLARGO_FCR4);
+                       reg |=  KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
+                               KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number);
+                       reg |=  KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
+                               KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1);
+                       MACIO_OUT32(KEYLARGO_FCR4, reg);
+                       (void)MACIO_IN32(KEYLARGO_FCR4);
+                       udelay(1);
+               } else {
+                       reg = MACIO_IN32(KEYLARGO_FCR3);
+                       reg |=  KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
+                               KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0);
+                       reg |=  KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
+                               KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1);
+                       MACIO_OUT32(KEYLARGO_FCR3, reg);
+                       (void)MACIO_IN32(KEYLARGO_FCR3);
+                       udelay(1);
+               }
+               if (number == 0) {
+                       if (macio->type != macio_intrepid)
+                               MACIO_BIC(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
+                       (void)MACIO_IN32(KEYLARGO_FCR0);
+                       udelay(1);
+                       MACIO_BIS(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
+                       (void)MACIO_IN32(KEYLARGO_FCR0);
+               } else if (number == 2) {
+                       if (macio->type != macio_intrepid)
+                               MACIO_BIC(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
+                       (void)MACIO_IN32(KEYLARGO_FCR0);
+                       udelay(1);
+                       MACIO_BIS(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
+                       (void)MACIO_IN32(KEYLARGO_FCR0);
+               } else if (number == 4) {
+                       udelay(1);
+                       MACIO_BIS(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
+                       (void)MACIO_IN32(KEYLARGO_FCR1);
+               }
+               udelay(1);
+       }
+       UNLOCK(flags);
+
+       return 0;
+}
+
+static long
+core99_firewire_enable(struct device_node *node, long param, long value)
+{
+       unsigned long flags;
+       struct macio_chip *macio;
+
+       macio = &macio_chips[0];
+       if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+           macio->type != macio_intrepid)
+               return -ENODEV;
+       if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
+               return -ENODEV;
+
+       LOCK(flags);
+       if (value) {
+               UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
+               (void)UN_IN(UNI_N_CLOCK_CNTL);
+       } else {
+               UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
+               (void)UN_IN(UNI_N_CLOCK_CNTL);
+       }
+       UNLOCK(flags);
+       mdelay(1);
+
+       return 0;
+}
+
+static long
+core99_firewire_cable_power(struct device_node *node, long param, long value)
+{
+       unsigned long flags;
+       struct macio_chip *macio;
+
+       /* Trick: we allow NULL node */
+       if ((pmac_mb.board_flags & PMAC_MB_HAS_FW_POWER) == 0)
+               return -ENODEV;
+       macio = &macio_chips[0];
+       if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+           macio->type != macio_intrepid)
+               return -ENODEV;
+       if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
+               return -ENODEV;
+
+       LOCK(flags);
+       if (value) {
+               MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 0);
+               MACIO_IN8(KL_GPIO_FW_CABLE_POWER);
+               udelay(10);
+       } else {
+               MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 4);
+               MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10);
+       }
+       UNLOCK(flags);
+       mdelay(1);
+
+       return 0;
+}
+
+static long
+intrepid_aack_delay_enable(struct device_node *node, long param, long value)
+{
+       unsigned long flags;
+
+       if (uninorth_rev < 0xd2)
+               return -ENODEV;
+
+       LOCK(flags);
+       if (param)
+               UN_BIS(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
+       else
+               UN_BIC(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
+       UNLOCK(flags);
+
+       return 0;
+}
+
+
+#endif /* CONFIG_POWER4 */
+
+static long
+core99_read_gpio(struct device_node *node, long param, long value)
+{
+       struct macio_chip *macio = &macio_chips[0];
+
+       return MACIO_IN8(param);
+}
+
+
+static long
+core99_write_gpio(struct device_node *node, long param, long value)
+{
+       struct macio_chip *macio = &macio_chips[0];
+
+       MACIO_OUT8(param, (u8)(value & 0xff));
+       return 0;
+}
+
+#ifdef CONFIG_POWER4
+static long g5_gmac_enable(struct device_node *node, long param, long value)
+{
+       struct macio_chip *macio = &macio_chips[0];
+       unsigned long flags;
+
+       if (node == NULL)
+               return -ENODEV;
+
+       LOCK(flags);
+       if (value) {
+               MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
+               mb();
+               k2_skiplist[0] = NULL;
+       } else {
+               k2_skiplist[0] = node;
+               mb();
+               MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
+       }
+       
+       UNLOCK(flags);
+       mdelay(1);
+
+       return 0;
+}
+
+static long g5_fw_enable(struct device_node *node, long param, long value)
+{
+       struct macio_chip *macio = &macio_chips[0];
+       unsigned long flags;
+
+       if (node == NULL)
+               return -ENODEV;
+
+       LOCK(flags);
+       if (value) {
+               MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
+               mb();
+               k2_skiplist[1] = NULL;
+       } else {
+               k2_skiplist[1] = node;
+               mb();
+               MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
+       }
+       
+       UNLOCK(flags);
+       mdelay(1);
+
+       return 0;
+}
+
+static long g5_mpic_enable(struct device_node *node, long param, long value)
+{
+       unsigned long flags;
+
+       if (node->parent == NULL || strcmp(node->parent->name, "u3"))
+               return 0;
+
+       LOCK(flags);
+       UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
+       UNLOCK(flags);
+
+       return 0;
+}
+
+static long g5_eth_phy_reset(struct device_node *node, long param, long value)
+{
+       struct macio_chip *macio = &macio_chips[0];
+       struct device_node *phy;
+       int need_reset;
+
+       /*
+        * We must not reset the combo PHYs, only the BCM5221 found in
+        * the iMac G5.
+        */
+       phy = of_get_next_child(node, NULL);
+       if (!phy)
+               return -ENODEV;
+       need_reset = device_is_compatible(phy, "B5221");
+       of_node_put(phy);
+       if (!need_reset)
+               return 0;
+
+       /* PHY reset is GPIO 29, not in device-tree unfortunately */
+       MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
+                  KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
+       /* Thankfully, this is now always called at a time when we can
+        * schedule by sungem.
+        */
+       msleep(10);
+       MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
+
+       return 0;
+}
+
+static long g5_i2s_enable(struct device_node *node, long param, long value)
+{
+       /* Very crude implementation for now */
+       struct macio_chip *macio = &macio_chips[0];
+       unsigned long flags;
+
+       if (value == 0)
+               return 0; /* don't disable yet */
+
+       LOCK(flags);
+       MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
+                 KL3_I2S0_CLK18_ENABLE);
+       udelay(10);
+       MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
+                 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
+       udelay(10);
+       MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
+       UNLOCK(flags);
+       udelay(10);
+
+       return 0;
+}
+
+
+#ifdef CONFIG_SMP
+static long g5_reset_cpu(struct device_node *node, long param, long value)
+{
+       unsigned int reset_io = 0;
+       unsigned long flags;
+       struct macio_chip *macio;
+       struct device_node *np;
+
+       macio = &macio_chips[0];
+       if (macio->type != macio_keylargo2)
+               return -ENODEV;
+
+       np = find_path_device("/cpus");
+       if (np == NULL)
+               return -ENODEV;
+       for (np = np->child; np != NULL; np = np->sibling) {
+               u32 *num = (u32 *)get_property(np, "reg", NULL);
+               u32 *rst = (u32 *)get_property(np, "soft-reset", NULL);
+               if (num == NULL || rst == NULL)
+                       continue;
+               if (param == *num) {
+                       reset_io = *rst;
+                       break;
+               }
+       }
+       if (np == NULL || reset_io == 0)
+               return -ENODEV;
+
+       LOCK(flags);
+       MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
+       (void)MACIO_IN8(reset_io);
+       udelay(1);
+       MACIO_OUT8(reset_io, 0);
+       (void)MACIO_IN8(reset_io);
+       UNLOCK(flags);
+
+       return 0;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * This can be called from pmac_smp so isn't static
+ *
+ * This takes the second CPU off the bus on dual CPU machines
+ * running UP
+ */
+void g5_phy_disable_cpu1(void)
+{
+       UN_OUT(U3_API_PHY_CONFIG_1, 0);
+}
+#endif /* CONFIG_POWER4 */
+
+#ifndef CONFIG_POWER4
+
+static void
+keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
+{
+       u32 temp;
+
+       if (sleep_mode) {
+               mdelay(1);
+               MACIO_BIS(KEYLARGO_FCR0, KL0_USB_REF_SUSPEND);
+               (void)MACIO_IN32(KEYLARGO_FCR0);
+               mdelay(1);
+       }
+
+       MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
+                               KL0_SCC_CELL_ENABLE |
+                               KL0_IRDA_ENABLE | KL0_IRDA_CLK32_ENABLE |
+                               KL0_IRDA_CLK19_ENABLE);
+
+       MACIO_BIC(KEYLARGO_MBCR, KL_MBCR_MB0_DEV_MASK);
+       MACIO_BIS(KEYLARGO_MBCR, KL_MBCR_MB0_IDE_ENABLE);
+
+       MACIO_BIC(KEYLARGO_FCR1,
+               KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
+               KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
+               KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
+               KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
+               KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
+               KL1_EIDE0_ENABLE | KL1_EIDE0_RESET_N |
+               KL1_EIDE1_ENABLE | KL1_EIDE1_RESET_N |
+               KL1_UIDE_ENABLE);
+
+       MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+       MACIO_BIC(KEYLARGO_FCR2, KL2_IOBUS_ENABLE);
+
+       temp = MACIO_IN32(KEYLARGO_FCR3);
+       if (macio->rev >= 2) {
+               temp |= KL3_SHUTDOWN_PLL2X;
+               if (sleep_mode)
+                       temp |= KL3_SHUTDOWN_PLL_TOTAL;
+       }
+
+       temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
+               KL3_SHUTDOWN_PLLKW35;
+       if (sleep_mode)
+               temp |= KL3_SHUTDOWN_PLLKW12;
+       temp &= ~(KL3_CLK66_ENABLE | KL3_CLK49_ENABLE | KL3_CLK45_ENABLE
+               | KL3_CLK31_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
+       if (sleep_mode)
+               temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_VIA_CLK16_ENABLE);
+       MACIO_OUT32(KEYLARGO_FCR3, temp);
+
+       /* Flush posted writes & wait a bit */
+       (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
+}
+
+static void
+pangea_shutdown(struct macio_chip *macio, int sleep_mode)
+{
+       u32 temp;
+
+       MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
+                               KL0_SCC_CELL_ENABLE |
+                               KL0_USB0_CELL_ENABLE | KL0_USB1_CELL_ENABLE);
+
+       MACIO_BIC(KEYLARGO_FCR1,
+               KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
+               KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
+               KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
+               KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
+               KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
+               KL1_UIDE_ENABLE);
+       if (pmac_mb.board_flags & PMAC_MB_MOBILE)
+               MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
+
+       MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+
+       temp = MACIO_IN32(KEYLARGO_FCR3);
+       temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
+               KL3_SHUTDOWN_PLLKW35;
+       temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE
+               | KL3_I2S0_CLK18_ENABLE | KL3_I2S1_CLK18_ENABLE);
+       if (sleep_mode)
+               temp &= ~(KL3_VIA_CLK16_ENABLE | KL3_TIMER_CLK18_ENABLE);
+       MACIO_OUT32(KEYLARGO_FCR3, temp);
+
+       /* Flush posted writes & wait a bit */
+       (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
+}
+
+static void
+intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
+{
+       u32 temp;
+
+       MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
+                 KL0_SCC_CELL_ENABLE);
+
+       MACIO_BIC(KEYLARGO_FCR1,
+                 /*KL1_USB2_CELL_ENABLE |*/
+               KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
+               KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
+               KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE);
+       if (pmac_mb.board_flags & PMAC_MB_MOBILE)
+               MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
+
+       temp = MACIO_IN32(KEYLARGO_FCR3);
+       temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE |
+                 KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
+       if (sleep_mode)
+               temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_IT_VIA_CLK32_ENABLE);
+       MACIO_OUT32(KEYLARGO_FCR3, temp);
+
+       /* Flush posted writes & wait a bit */
+       (void)MACIO_IN32(KEYLARGO_FCR0);
+       mdelay(10);
+}
+
+
+void pmac_tweak_clock_spreading(int enable)
+{
+       struct macio_chip *macio = &macio_chips[0];
+
+       /* Hack for doing clock spreading on some machines PowerBooks and
+        * iBooks. This implements the "platform-do-clockspreading" OF
+        * property as decoded manually on various models. For safety, we also
+        * check the product ID in the device-tree in cases we'll whack the i2c
+        * chip to make reasonably sure we won't set wrong values in there
+        *
+        * Of course, ultimately, we have to implement a real parser for
+        * the platform-do-* stuff...
+        */
+
+       if (macio->type == macio_intrepid) {
+               if (enable)
+                       UN_OUT(UNI_N_CLOCK_SPREADING, 2);
+               else
+                       UN_OUT(UNI_N_CLOCK_SPREADING, 0);
+               mdelay(40);
+       }
+
+       while (machine_is_compatible("PowerBook5,2") ||
+              machine_is_compatible("PowerBook5,3") ||
+              machine_is_compatible("PowerBook6,2") ||
+              machine_is_compatible("PowerBook6,3")) {
+               struct device_node *ui2c = of_find_node_by_type(NULL, "i2c");
+               struct device_node *dt = of_find_node_by_name(NULL, "device-tree");
+               u8 buffer[9];
+               u32 *productID;
+               int i, rc, changed = 0;
+
+               if (dt == NULL)
+                       break;
+               productID = (u32 *)get_property(dt, "pid#", NULL);
+               if (productID == NULL)
+                       break;
+               while(ui2c) {
+                       struct device_node *p = of_get_parent(ui2c);
+                       if (p && !strcmp(p->name, "uni-n"))
+                               break;
+                       ui2c = of_find_node_by_type(ui2c, "i2c");
+               }
+               if (ui2c == NULL)
+                       break;
+               DBG("Trying to bump clock speed for PID: %08x...\n", *productID);
+               rc = pmac_low_i2c_open(ui2c, 1);
+               if (rc != 0)
+                       break;
+               pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
+               rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
+               DBG("read result: %d,", rc);
+               if (rc != 0) {
+                       pmac_low_i2c_close(ui2c);
+                       break;
+               }
+               for (i=0; i<9; i++)
+                       DBG(" %02x", buffer[i]);
+               DBG("\n");
+
+               switch(*productID) {
+               case 0x1182:    /* AlBook 12" rev 2 */
+               case 0x1183:    /* iBook G4 12" */
+                       buffer[0] = (buffer[0] & 0x8f) | 0x70;
+                       buffer[2] = (buffer[2] & 0x7f) | 0x00;
+                       buffer[5] = (buffer[5] & 0x80) | 0x31;
+                       buffer[6] = (buffer[6] & 0x40) | 0xb0;
+                       buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba);
+                       buffer[8] = (buffer[8] & 0x00) | 0x30;
+                       changed = 1;
+                       break;
+               case 0x3142:    /* AlBook 15" (ATI M10) */
+               case 0x3143:    /* AlBook 17" (ATI M10) */
+                       buffer[0] = (buffer[0] & 0xaf) | 0x50;
+                       buffer[2] = (buffer[2] & 0x7f) | 0x00;
+                       buffer[5] = (buffer[5] & 0x80) | 0x31;
+                       buffer[6] = (buffer[6] & 0x40) | 0xb0;
+                       buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0);
+                       buffer[8] = (buffer[8] & 0x00) | 0x30;
+                       changed = 1;
+                       break;
+               default:
+                       DBG("i2c-hwclock: Machine model not handled\n");
+                       break;
+               }
+               if (!changed) {
+                       pmac_low_i2c_close(ui2c);
+                       break;
+               }
+               pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
+               rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
+               DBG("write result: %d,", rc);
+               pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
+               rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
+               DBG("read result: %d,", rc);
+               if (rc != 0) {
+                       pmac_low_i2c_close(ui2c);
+                       break;
+               }
+               for (i=0; i<9; i++)
+                       DBG(" %02x", buffer[i]);
+               pmac_low_i2c_close(ui2c);
+               break;
+       }
+}
+
+
+static int
+core99_sleep(void)
+{
+       struct macio_chip *macio;
+       int i;
+
+       macio = &macio_chips[0];
+       if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+           macio->type != macio_intrepid)
+               return -ENODEV;
+
+       /* We power off the wireless slot in case it was not done
+        * by the driver. We don't power it on automatically however
+        */
+       if (macio->flags & MACIO_FLAG_AIRPORT_ON)
+               core99_airport_enable(macio->of_node, 0, 0);
+
+       /* We power off the FW cable. Should be done by the driver... */
+       if (macio->flags & MACIO_FLAG_FW_SUPPORTED) {
+               core99_firewire_enable(NULL, 0, 0);
+               core99_firewire_cable_power(NULL, 0, 0);
+       }
+
+       /* We make sure int. modem is off (in case driver lost it) */
+       if (macio->type == macio_keylargo)
+               core99_modem_enable(macio->of_node, 0, 0);
+       else
+               pangea_modem_enable(macio->of_node, 0, 0);
+
+       /* We make sure the sound is off as well */
+       core99_sound_chip_enable(macio->of_node, 0, 0);
+
+       /*
+        * Save various bits of KeyLargo
+        */
+
+       /* Save the state of the various GPIOs */
+       save_gpio_levels[0] = MACIO_IN32(KEYLARGO_GPIO_LEVELS0);
+       save_gpio_levels[1] = MACIO_IN32(KEYLARGO_GPIO_LEVELS1);
+       for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
+               save_gpio_extint[i] = MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+i);
+       for (i=0; i<KEYLARGO_GPIO_CNT; i++)
+               save_gpio_normal[i] = MACIO_IN8(KEYLARGO_GPIO_0+i);
+
+       /* Save the FCRs */
+       if (macio->type == macio_keylargo)
+               save_mbcr = MACIO_IN32(KEYLARGO_MBCR);
+       save_fcr[0] = MACIO_IN32(KEYLARGO_FCR0);
+       save_fcr[1] = MACIO_IN32(KEYLARGO_FCR1);
+       save_fcr[2] = MACIO_IN32(KEYLARGO_FCR2);
+       save_fcr[3] = MACIO_IN32(KEYLARGO_FCR3);
+       save_fcr[4] = MACIO_IN32(KEYLARGO_FCR4);
+       if (macio->type == macio_pangea || macio->type == macio_intrepid)
+               save_fcr[5] = MACIO_IN32(KEYLARGO_FCR5);
+
+       /* Save state & config of DBDMA channels */
+       dbdma_save(macio, save_dbdma);
+
+       /*
+        * Turn off as much as we can
+        */
+       if (macio->type == macio_pangea)
+               pangea_shutdown(macio, 1);
+       else if (macio->type == macio_intrepid)
+               intrepid_shutdown(macio, 1);
+       else if (macio->type == macio_keylargo)
+               keylargo_shutdown(macio, 1);
+
+       /*
+        * Put the host bridge to sleep
+        */
+
+       save_unin_clock_ctl = UN_IN(UNI_N_CLOCK_CNTL);
+       /* Note: do not switch GMAC off, driver does it when necessary, WOL must keep it
+        * enabled !
+        */
+       UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl &
+              ~(/*UNI_N_CLOCK_CNTL_GMAC|*/UNI_N_CLOCK_CNTL_FW/*|UNI_N_CLOCK_CNTL_PCI*/));
+       udelay(100);
+       UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
+       UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_SLEEP);
+       mdelay(10);
+
+       /*
+        * FIXME: A bit of black magic with OpenPIC (don't ask me why)
+        */
+       if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
+               MACIO_BIS(0x506e0, 0x00400000);
+               MACIO_BIS(0x506e0, 0x80000000);
+       }
+       return 0;
+}
+
+static int
+core99_wake_up(void)
+{
+       struct macio_chip *macio;
+       int i;
+
+       macio = &macio_chips[0];
+       if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+           macio->type != macio_intrepid)
+               return -ENODEV;
+
+       /*
+        * Wakeup the host bridge
+        */
+       UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
+       udelay(10);
+       UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
+       udelay(10);
+
+       /*
+        * Restore KeyLargo
+        */
+
+       if (macio->type == macio_keylargo) {
+               MACIO_OUT32(KEYLARGO_MBCR, save_mbcr);
+               (void)MACIO_IN32(KEYLARGO_MBCR); udelay(10);
+       }
+       MACIO_OUT32(KEYLARGO_FCR0, save_fcr[0]);
+       (void)MACIO_IN32(KEYLARGO_FCR0); udelay(10);
+       MACIO_OUT32(KEYLARGO_FCR1, save_fcr[1]);
+       (void)MACIO_IN32(KEYLARGO_FCR1); udelay(10);
+       MACIO_OUT32(KEYLARGO_FCR2, save_fcr[2]);
+       (void)MACIO_IN32(KEYLARGO_FCR2); udelay(10);
+       MACIO_OUT32(KEYLARGO_FCR3, save_fcr[3]);
+       (void)MACIO_IN32(KEYLARGO_FCR3); udelay(10);
+       MACIO_OUT32(KEYLARGO_FCR4, save_fcr[4]);
+       (void)MACIO_IN32(KEYLARGO_FCR4); udelay(10);
+       if (macio->type == macio_pangea || macio->type == macio_intrepid) {
+               MACIO_OUT32(KEYLARGO_FCR5, save_fcr[5]);
+               (void)MACIO_IN32(KEYLARGO_FCR5); udelay(10);
+       }
+
+       dbdma_restore(macio, save_dbdma);
+
+       MACIO_OUT32(KEYLARGO_GPIO_LEVELS0, save_gpio_levels[0]);
+       MACIO_OUT32(KEYLARGO_GPIO_LEVELS1, save_gpio_levels[1]);
+       for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
+               MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+i, save_gpio_extint[i]);
+       for (i=0; i<KEYLARGO_GPIO_CNT; i++)
+               MACIO_OUT8(KEYLARGO_GPIO_0+i, save_gpio_normal[i]);
+
+       /* FIXME more black magic with OpenPIC ... */
+       if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
+               MACIO_BIC(0x506e0, 0x00400000);
+               MACIO_BIC(0x506e0, 0x80000000);
+       }
+
+       UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl);
+       udelay(100);
+
+       return 0;
+}
+
+static long
+core99_sleep_state(struct device_node *node, long param, long value)
+{
+       /* Param == 1 means to enter the "fake sleep" mode that is
+        * used for CPU speed switch
+        */
+       if (param == 1) {
+               if (value == 1) {
+                       UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
+                       UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_IDLE2);
+               } else {
+                       UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
+                       udelay(10);
+                       UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
+                       udelay(10);
+               }
+               return 0;
+       }
+       if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
+               return -EPERM;
+
+       if (value == 1)
+               return core99_sleep();
+       else if (value == 0)
+               return core99_wake_up();
+       return 0;
+}
+
+#endif /* CONFIG_POWER4 */
+
+static long
+generic_dev_can_wake(struct device_node *node, long param, long value)
+{
+       /* Todo: eventually check we are really dealing with on-board
+        * video device ...
+        */
+
+       if (pmac_mb.board_flags & PMAC_MB_MAY_SLEEP)
+               pmac_mb.board_flags |= PMAC_MB_CAN_SLEEP;
+       return 0;
+}
+
+static long generic_get_mb_info(struct device_node *node, long param, long value)
+{
+       switch(param) {
+               case PMAC_MB_INFO_MODEL:
+                       return pmac_mb.model_id;
+               case PMAC_MB_INFO_FLAGS:
+                       return pmac_mb.board_flags;
+               case PMAC_MB_INFO_NAME:
+                       /* hack hack hack... but should work */
+                       *((const char **)value) = pmac_mb.model_name;
+                       return 0;
+       }
+       return -EINVAL;
+}
+
+
+/*
+ * Table definitions
+ */
+
+/* Used on any machine
+ */
+static struct feature_table_entry any_features[] = {
+       { PMAC_FTR_GET_MB_INFO,         generic_get_mb_info },
+       { PMAC_FTR_DEVICE_CAN_WAKE,     generic_dev_can_wake },
+       { 0, NULL }
+};
+
+#ifndef CONFIG_POWER4
+
+/* OHare based motherboards. Currently, we only use these on the
+ * 2400,3400 and 3500 series powerbooks. Some older desktops seem
+ * to have issues with turning on/off those asic cells
+ */
+static struct feature_table_entry ohare_features[] = {
+       { PMAC_FTR_SCC_ENABLE,          ohare_htw_scc_enable },
+       { PMAC_FTR_SWIM3_ENABLE,        ohare_floppy_enable },
+       { PMAC_FTR_MESH_ENABLE,         ohare_mesh_enable },
+       { PMAC_FTR_IDE_ENABLE,          ohare_ide_enable},
+       { PMAC_FTR_IDE_RESET,           ohare_ide_reset},
+       { PMAC_FTR_SLEEP_STATE,         ohare_sleep_state },
+       { 0, NULL }
+};
+
+/* Heathrow desktop machines (Beige G3).
+ * Separated as some features couldn't be properly tested
+ * and the serial port control bits appear to confuse it.
+ */
+static struct feature_table_entry heathrow_desktop_features[] = {
+       { PMAC_FTR_SWIM3_ENABLE,        heathrow_floppy_enable },
+       { PMAC_FTR_MESH_ENABLE,         heathrow_mesh_enable },
+       { PMAC_FTR_IDE_ENABLE,          heathrow_ide_enable },
+       { PMAC_FTR_IDE_RESET,           heathrow_ide_reset },
+       { PMAC_FTR_BMAC_ENABLE,         heathrow_bmac_enable },
+       { 0, NULL }
+};
+
+/* Heathrow based laptop, that is the Wallstreet and mainstreet
+ * powerbooks.
+ */
+static struct feature_table_entry heathrow_laptop_features[] = {
+       { PMAC_FTR_SCC_ENABLE,          ohare_htw_scc_enable },
+       { PMAC_FTR_MODEM_ENABLE,        heathrow_modem_enable },
+       { PMAC_FTR_SWIM3_ENABLE,        heathrow_floppy_enable },
+       { PMAC_FTR_MESH_ENABLE,         heathrow_mesh_enable },
+       { PMAC_FTR_IDE_ENABLE,          heathrow_ide_enable },
+       { PMAC_FTR_IDE_RESET,           heathrow_ide_reset },
+       { PMAC_FTR_BMAC_ENABLE,         heathrow_bmac_enable },
+       { PMAC_FTR_SOUND_CHIP_ENABLE,   heathrow_sound_enable },
+       { PMAC_FTR_SLEEP_STATE,         heathrow_sleep_state },
+       { 0, NULL }
+};
+
+/* Paddington based machines
+ * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
+ */
+static struct feature_table_entry paddington_features[] = {
+       { PMAC_FTR_SCC_ENABLE,          ohare_htw_scc_enable },
+       { PMAC_FTR_MODEM_ENABLE,        heathrow_modem_enable },
+       { PMAC_FTR_SWIM3_ENABLE,        heathrow_floppy_enable },
+       { PMAC_FTR_MESH_ENABLE,         heathrow_mesh_enable },
+       { PMAC_FTR_IDE_ENABLE,          heathrow_ide_enable },
+       { PMAC_FTR_IDE_RESET,           heathrow_ide_reset },
+       { PMAC_FTR_BMAC_ENABLE,         heathrow_bmac_enable },
+       { PMAC_FTR_SOUND_CHIP_ENABLE,   heathrow_sound_enable },
+       { PMAC_FTR_SLEEP_STATE,         heathrow_sleep_state },
+       { 0, NULL }
+};
+
+/* Core99 & MacRISC 2 machines (all machines released since the
+ * iBook (included), that is all AGP machines, except pangea
+ * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
+ * used on iBook2 & iMac "flow power".
+ */
+static struct feature_table_entry core99_features[] = {
+       { PMAC_FTR_SCC_ENABLE,          core99_scc_enable },
+       { PMAC_FTR_MODEM_ENABLE,        core99_modem_enable },
+       { PMAC_FTR_IDE_ENABLE,          core99_ide_enable },
+       { PMAC_FTR_IDE_RESET,           core99_ide_reset },
+       { PMAC_FTR_GMAC_ENABLE,         core99_gmac_enable },
+       { PMAC_FTR_GMAC_PHY_RESET,      core99_gmac_phy_reset },
+       { PMAC_FTR_SOUND_CHIP_ENABLE,   core99_sound_chip_enable },
+       { PMAC_FTR_AIRPORT_ENABLE,      core99_airport_enable },
+       { PMAC_FTR_USB_ENABLE,          core99_usb_enable },
+       { PMAC_FTR_1394_ENABLE,         core99_firewire_enable },
+       { PMAC_FTR_1394_CABLE_POWER,    core99_firewire_cable_power },
+       { PMAC_FTR_SLEEP_STATE,         core99_sleep_state },
+#ifdef CONFIG_SMP
+       { PMAC_FTR_RESET_CPU,           core99_reset_cpu },
+#endif /* CONFIG_SMP */
+       { PMAC_FTR_READ_GPIO,           core99_read_gpio },
+       { PMAC_FTR_WRITE_GPIO,          core99_write_gpio },
+       { 0, NULL }
+};
+
+/* RackMac
+ */
+static struct feature_table_entry rackmac_features[] = {
+       { PMAC_FTR_SCC_ENABLE,          core99_scc_enable },
+       { PMAC_FTR_IDE_ENABLE,          core99_ide_enable },
+       { PMAC_FTR_IDE_RESET,           core99_ide_reset },
+       { PMAC_FTR_GMAC_ENABLE,         core99_gmac_enable },
+       { PMAC_FTR_GMAC_PHY_RESET,      core99_gmac_phy_reset },
+       { PMAC_FTR_USB_ENABLE,          core99_usb_enable },
+       { PMAC_FTR_1394_ENABLE,         core99_firewire_enable },
+       { PMAC_FTR_1394_CABLE_POWER,    core99_firewire_cable_power },
+       { PMAC_FTR_SLEEP_STATE,         core99_sleep_state },
+#ifdef CONFIG_SMP
+       { PMAC_FTR_RESET_CPU,           core99_reset_cpu },
+#endif /* CONFIG_SMP */
+       { PMAC_FTR_READ_GPIO,           core99_read_gpio },
+       { PMAC_FTR_WRITE_GPIO,          core99_write_gpio },
+       { 0, NULL }
+};
+
+/* Pangea features
+ */
+static struct feature_table_entry pangea_features[] = {
+       { PMAC_FTR_SCC_ENABLE,          core99_scc_enable },
+       { PMAC_FTR_MODEM_ENABLE,        pangea_modem_enable },
+       { PMAC_FTR_IDE_ENABLE,          core99_ide_enable },
+       { PMAC_FTR_IDE_RESET,           core99_ide_reset },
+       { PMAC_FTR_GMAC_ENABLE,         core99_gmac_enable },
+       { PMAC_FTR_GMAC_PHY_RESET,      core99_gmac_phy_reset },
+       { PMAC_FTR_SOUND_CHIP_ENABLE,   core99_sound_chip_enable },
+       { PMAC_FTR_AIRPORT_ENABLE,      core99_airport_enable },
+       { PMAC_FTR_USB_ENABLE,          core99_usb_enable },
+       { PMAC_FTR_1394_ENABLE,         core99_firewire_enable },
+       { PMAC_FTR_1394_CABLE_POWER,    core99_firewire_cable_power },
+       { PMAC_FTR_SLEEP_STATE,         core99_sleep_state },
+       { PMAC_FTR_READ_GPIO,           core99_read_gpio },
+       { PMAC_FTR_WRITE_GPIO,          core99_write_gpio },
+       { 0, NULL }
+};
+
+/* Intrepid features
+ */
+static struct feature_table_entry intrepid_features[] = {
+       { PMAC_FTR_SCC_ENABLE,          core99_scc_enable },
+       { PMAC_FTR_MODEM_ENABLE,        pangea_modem_enable },
+       { PMAC_FTR_IDE_ENABLE,          core99_ide_enable },
+       { PMAC_FTR_IDE_RESET,           core99_ide_reset },
+       { PMAC_FTR_GMAC_ENABLE,         core99_gmac_enable },
+       { PMAC_FTR_GMAC_PHY_RESET,      core99_gmac_phy_reset },
+       { PMAC_FTR_SOUND_CHIP_ENABLE,   core99_sound_chip_enable },
+       { PMAC_FTR_AIRPORT_ENABLE,      core99_airport_enable },
+       { PMAC_FTR_USB_ENABLE,          core99_usb_enable },
+       { PMAC_FTR_1394_ENABLE,         core99_firewire_enable },
+       { PMAC_FTR_1394_CABLE_POWER,    core99_firewire_cable_power },
+       { PMAC_FTR_SLEEP_STATE,         core99_sleep_state },
+       { PMAC_FTR_READ_GPIO,           core99_read_gpio },
+       { PMAC_FTR_WRITE_GPIO,          core99_write_gpio },
+       { PMAC_FTR_AACK_DELAY_ENABLE,   intrepid_aack_delay_enable },
+       { 0, NULL }
+};
+
+#else /* CONFIG_POWER4 */
+
+/* G5 features
+ */
+static struct feature_table_entry g5_features[] = {
+       { PMAC_FTR_GMAC_ENABLE,         g5_gmac_enable },
+       { PMAC_FTR_1394_ENABLE,         g5_fw_enable },
+       { PMAC_FTR_ENABLE_MPIC,         g5_mpic_enable },
+       { PMAC_FTR_GMAC_PHY_RESET,      g5_eth_phy_reset },
+       { PMAC_FTR_SOUND_CHIP_ENABLE,   g5_i2s_enable },
+#ifdef CONFIG_SMP
+       { PMAC_FTR_RESET_CPU,           g5_reset_cpu },
+#endif /* CONFIG_SMP */
+       { PMAC_FTR_READ_GPIO,           core99_read_gpio },
+       { PMAC_FTR_WRITE_GPIO,          core99_write_gpio },
+       { 0, NULL }
+};
+
+#endif /* CONFIG_POWER4 */
+
+static struct pmac_mb_def pmac_mb_defs[] = {
+#ifndef CONFIG_POWER4
+       /*
+        * Desktops
+        */
+
+       {       "AAPL,8500",                    "PowerMac 8500/8600",
+               PMAC_TYPE_PSURGE,               NULL,
+               0
+       },
+       {       "AAPL,9500",                    "PowerMac 9500/9600",
+               PMAC_TYPE_PSURGE,               NULL,
+               0
+       },
+       {       "AAPL,7200",                    "PowerMac 7200",
+               PMAC_TYPE_PSURGE,               NULL,
+               0
+       },
+       {       "AAPL,7300",                    "PowerMac 7200/7300",
+               PMAC_TYPE_PSURGE,               NULL,
+               0
+       },
+       {       "AAPL,7500",                    "PowerMac 7500",
+               PMAC_TYPE_PSURGE,               NULL,
+               0
+       },
+       {       "AAPL,ShinerESB",               "Apple Network Server",
+               PMAC_TYPE_ANS,                  NULL,
+               0
+       },
+       {       "AAPL,e407",                    "Alchemy",
+               PMAC_TYPE_ALCHEMY,              NULL,
+               0
+       },
+       {       "AAPL,e411",                    "Gazelle",
+               PMAC_TYPE_GAZELLE,              NULL,
+               0
+       },
+       {       "AAPL,Gossamer",                "PowerMac G3 (Gossamer)",
+               PMAC_TYPE_GOSSAMER,             heathrow_desktop_features,
+               0
+       },
+       {       "AAPL,PowerMac G3",             "PowerMac G3 (Silk)",
+               PMAC_TYPE_SILK,                 heathrow_desktop_features,
+               0
+       },
+       {       "PowerMac1,1",                  "Blue&White G3",
+               PMAC_TYPE_YOSEMITE,             paddington_features,
+               0
+       },
+       {       "PowerMac1,2",                  "PowerMac G4 PCI Graphics",
+               PMAC_TYPE_YIKES,                paddington_features,
+               0
+       },
+       {       "PowerMac2,1",                  "iMac FireWire",
+               PMAC_TYPE_FW_IMAC,              core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+       },
+       {       "PowerMac2,2",                  "iMac FireWire",
+               PMAC_TYPE_FW_IMAC,              core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+       },
+       {       "PowerMac3,1",                  "PowerMac G4 AGP Graphics",
+               PMAC_TYPE_SAWTOOTH,             core99_features,
+               PMAC_MB_OLD_CORE99
+       },
+       {       "PowerMac3,2",                  "PowerMac G4 AGP Graphics",
+               PMAC_TYPE_SAWTOOTH,             core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+       },
+       {       "PowerMac3,3",                  "PowerMac G4 AGP Graphics",
+               PMAC_TYPE_SAWTOOTH,             core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+       },
+       {       "PowerMac3,4",                  "PowerMac G4 Silver",
+               PMAC_TYPE_QUICKSILVER,          core99_features,
+               PMAC_MB_MAY_SLEEP
+       },
+       {       "PowerMac3,5",                  "PowerMac G4 Silver",
+               PMAC_TYPE_QUICKSILVER,          core99_features,
+               PMAC_MB_MAY_SLEEP
+       },
+       {       "PowerMac3,6",                  "PowerMac G4 Windtunnel",
+               PMAC_TYPE_WINDTUNNEL,           core99_features,
+               PMAC_MB_MAY_SLEEP,
+       },
+       {       "PowerMac4,1",                  "iMac \"Flower Power\"",
+               PMAC_TYPE_PANGEA_IMAC,          pangea_features,
+               PMAC_MB_MAY_SLEEP
+       },
+       {       "PowerMac4,2",                  "Flat panel iMac",
+               PMAC_TYPE_FLAT_PANEL_IMAC,      pangea_features,
+               PMAC_MB_CAN_SLEEP
+       },
+       {       "PowerMac4,4",                  "eMac",
+               PMAC_TYPE_EMAC,                 core99_features,
+               PMAC_MB_MAY_SLEEP
+       },
+       {       "PowerMac5,1",                  "PowerMac G4 Cube",
+               PMAC_TYPE_CUBE,                 core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+       },
+       {       "PowerMac6,1",                  "Flat panel iMac",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP,
+       },
+       {       "PowerMac6,3",                  "Flat panel iMac",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP,
+       },
+       {       "PowerMac6,4",                  "eMac",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP,
+       },
+       {       "PowerMac10,1",                 "Mac mini",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER,
+       },
+       {       "iMac,1",                       "iMac (first generation)",
+               PMAC_TYPE_ORIG_IMAC,            paddington_features,
+               0
+       },
+
+       /*
+        * Xserve's
+        */
+
+       {       "RackMac1,1",                   "XServe",
+               PMAC_TYPE_RACKMAC,              rackmac_features,
+               0,
+       },
+       {       "RackMac1,2",                   "XServe rev. 2",
+               PMAC_TYPE_RACKMAC,              rackmac_features,
+               0,
+       },
+
+       /*
+        * Laptops
+        */
+
+       {       "AAPL,3400/2400",               "PowerBook 3400",
+               PMAC_TYPE_HOOPER,               ohare_features,
+               PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+       },
+       {       "AAPL,3500",                    "PowerBook 3500",
+               PMAC_TYPE_KANGA,                ohare_features,
+               PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+       },
+       {       "AAPL,PowerBook1998",           "PowerBook Wallstreet",
+               PMAC_TYPE_WALLSTREET,           heathrow_laptop_features,
+               PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+       },
+       {       "PowerBook1,1",                 "PowerBook 101 (Lombard)",
+               PMAC_TYPE_101_PBOOK,            paddington_features,
+               PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+       },
+       {       "PowerBook2,1",                 "iBook (first generation)",
+               PMAC_TYPE_ORIG_IBOOK,           core99_features,
+               PMAC_MB_CAN_SLEEP | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
+       },
+       {       "PowerBook2,2",                 "iBook FireWire",
+               PMAC_TYPE_FW_IBOOK,             core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
+               PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
+       },
+       {       "PowerBook3,1",                 "PowerBook Pismo",
+               PMAC_TYPE_PISMO,                core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
+               PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
+       },
+       {       "PowerBook3,2",                 "PowerBook Titanium",
+               PMAC_TYPE_TITANIUM,             core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+       },
+       {       "PowerBook3,3",                 "PowerBook Titanium II",
+               PMAC_TYPE_TITANIUM2,            core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+       },
+       {       "PowerBook3,4",                 "PowerBook Titanium III",
+               PMAC_TYPE_TITANIUM3,            core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+       },
+       {       "PowerBook3,5",                 "PowerBook Titanium IV",
+               PMAC_TYPE_TITANIUM4,            core99_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+       },
+       {       "PowerBook4,1",                 "iBook 2",
+               PMAC_TYPE_IBOOK2,               pangea_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+       },
+       {       "PowerBook4,2",                 "iBook 2",
+               PMAC_TYPE_IBOOK2,               pangea_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+       },
+       {       "PowerBook4,3",                 "iBook 2 rev. 2",
+               PMAC_TYPE_IBOOK2,               pangea_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+       },
+       {       "PowerBook5,1",                 "PowerBook G4 17\"",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook5,2",                 "PowerBook G4 15\"",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook5,3",                 "PowerBook G4 17\"",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook5,4",                 "PowerBook G4 15\"",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook5,5",                 "PowerBook G4 17\"",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook5,6",                 "PowerBook G4 15\"",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook5,7",                 "PowerBook G4 17\"",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook6,1",                 "PowerBook G4 12\"",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook6,2",                 "PowerBook G4",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook6,3",                 "iBook G4",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook6,4",                 "PowerBook G4 12\"",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook6,5",                 "iBook G4",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook6,7",                 "iBook G4",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+       {       "PowerBook6,8",                 "PowerBook G4 12\"",
+               PMAC_TYPE_UNKNOWN_INTREPID,     intrepid_features,
+               PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+       },
+#else /* CONFIG_POWER4 */
+       {       "PowerMac7,2",                  "PowerMac G5",
+               PMAC_TYPE_POWERMAC_G5,          g5_features,
+               0,
+       },
+#ifdef CONFIG_PPC64
+       {       "PowerMac7,3",                  "PowerMac G5",
+               PMAC_TYPE_POWERMAC_G5,          g5_features,
+               0,
+       },
+       {       "PowerMac8,1",                  "iMac G5",
+               PMAC_TYPE_IMAC_G5,              g5_features,
+               0,
+       },
+       {       "PowerMac9,1",                  "PowerMac G5",
+               PMAC_TYPE_POWERMAC_G5_U3L,      g5_features,
+               0,
+       },
+       {       "RackMac3,1",                   "XServe G5",
+               PMAC_TYPE_XSERVE_G5,            g5_features,
+               0,
+       },
+#endif /* CONFIG_PPC64 */
+#endif /* CONFIG_POWER4 */
+};
+
+/*
+ * The toplevel feature_call callback
+ */
+long pmac_do_feature_call(unsigned int selector, ...)
+{
+       struct device_node *node;
+       long param, value;
+       int i;
+       feature_call func = NULL;
+       va_list args;
+
+       if (pmac_mb.features)
+               for (i=0; pmac_mb.features[i].function; i++)
+                       if (pmac_mb.features[i].selector == selector) {
+                               func = pmac_mb.features[i].function;
+                               break;
+                       }
+       if (!func)
+               for (i=0; any_features[i].function; i++)
+                       if (any_features[i].selector == selector) {
+                               func = any_features[i].function;
+                               break;
+                       }
+       if (!func)
+               return -ENODEV;
+
+       va_start(args, selector);
+       node = (struct device_node*)va_arg(args, void*);
+       param = va_arg(args, long);
+       value = va_arg(args, long);
+       va_end(args);
+
+       return func(node, param, value);
+}
+
+static int __init probe_motherboard(void)
+{
+       int i;
+       struct macio_chip *macio = &macio_chips[0];
+       const char *model = NULL;
+       struct device_node *dt;
+
+       /* Lookup known motherboard type in device-tree. First try an
+        * exact match on the "model" property, then try a "compatible"
+        * match is none is found.
+        */
+       dt = find_devices("device-tree");
+       if (dt != NULL)
+               model = (const char *) get_property(dt, "model", NULL);
+       for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
+           if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
+               pmac_mb = pmac_mb_defs[i];
+               goto found;
+           }
+       }
+       for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
+           if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
+               pmac_mb = pmac_mb_defs[i];
+               goto found;
+           }
+       }
+
+       /* Fallback to selection depending on mac-io chip type */
+       switch(macio->type) {
+#ifndef CONFIG_POWER4
+           case macio_grand_central:
+               pmac_mb.model_id = PMAC_TYPE_PSURGE;
+               pmac_mb.model_name = "Unknown PowerSurge";
+               break;
+           case macio_ohare:
+               pmac_mb.model_id = PMAC_TYPE_UNKNOWN_OHARE;
+               pmac_mb.model_name = "Unknown OHare-based";
+               break;
+           case macio_heathrow:
+               pmac_mb.model_id = PMAC_TYPE_UNKNOWN_HEATHROW;
+               pmac_mb.model_name = "Unknown Heathrow-based";
+               pmac_mb.features = heathrow_desktop_features;
+               break;
+           case macio_paddington:
+               pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PADDINGTON;
+               pmac_mb.model_name = "Unknown Paddington-based";
+               pmac_mb.features = paddington_features;
+               break;
+           case macio_keylargo:
+               pmac_mb.model_id = PMAC_TYPE_UNKNOWN_CORE99;
+               pmac_mb.model_name = "Unknown Keylargo-based";
+               pmac_mb.features = core99_features;
+               break;
+           case macio_pangea:
+               pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PANGEA;
+               pmac_mb.model_name = "Unknown Pangea-based";
+               pmac_mb.features = pangea_features;
+               break;
+           case macio_intrepid:
+               pmac_mb.model_id = PMAC_TYPE_UNKNOWN_INTREPID;
+               pmac_mb.model_name = "Unknown Intrepid-based";
+               pmac_mb.features = intrepid_features;
+               break;
+#else /* CONFIG_POWER4 */
+       case macio_keylargo2:
+               pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
+               pmac_mb.model_name = "Unknown K2-based";
+               pmac_mb.features = g5_features;
+               break;
+#endif /* CONFIG_POWER4 */
+       default:
+               return -ENODEV;
+       }
+found:
+#ifndef CONFIG_POWER4
+       /* Fixup Hooper vs. Comet */
+       if (pmac_mb.model_id == PMAC_TYPE_HOOPER) {
+               u32 __iomem * mach_id_ptr = ioremap(0xf3000034, 4);
+               if (!mach_id_ptr)
+                       return -ENODEV;
+               /* Here, I used to disable the media-bay on comet. It
+                * appears this is wrong, the floppy connector is actually
+                * a kind of media-bay and works with the current driver.
+                */
+               if (__raw_readl(mach_id_ptr) & 0x20000000UL)
+                       pmac_mb.model_id = PMAC_TYPE_COMET;
+               iounmap(mach_id_ptr);
+       }
+#endif /* CONFIG_POWER4 */
+
+#ifdef CONFIG_6xx
+       /* Set default value of powersave_nap on machines that support it.
+        * It appears that uninorth rev 3 has a problem with it, we don't
+        * enable it on those. In theory, the flush-on-lock property is
+        * supposed to be set when not supported, but I'm not very confident
+        * that all Apple OF revs did it properly, I do it the paranoid way.
+        */
+       while (uninorth_base && uninorth_rev > 3) {
+               struct device_node *np = find_path_device("/cpus");
+               if (!np || !np->child) {
+                       printk(KERN_WARNING "Can't find CPU(s) in device tree !\n");
+                       break;
+               }
+               np = np->child;
+               /* Nap mode not supported on SMP */
+               if (np->sibling)
+                       break;
+               /* Nap mode not supported if flush-on-lock property is present */
+               if (get_property(np, "flush-on-lock", NULL))
+                       break;
+               powersave_nap = 1;
+               printk(KERN_INFO "Processor NAP mode on idle enabled.\n");
+               break;
+       }
+
+       /* On CPUs that support it (750FX), lowspeed by default during
+        * NAP mode
+        */
+       powersave_lowspeed = 1;
+#endif /* CONFIG_6xx */
+#ifdef CONFIG_POWER4
+       powersave_nap = 1;
+#endif
+       /* Check for "mobile" machine */
+       if (model && (strncmp(model, "PowerBook", 9) == 0
+                  || strncmp(model, "iBook", 5) == 0))
+               pmac_mb.board_flags |= PMAC_MB_MOBILE;
+
+
+       printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
+       return 0;
+}
+
+/* Initialize the Core99 UniNorth host bridge and memory controller
+ */
+static void __init probe_uninorth(void)
+{
+       unsigned long actrl;
+
+       /* Locate core99 Uni-N */
+       uninorth_node = of_find_node_by_name(NULL, "uni-n");
+       /* Locate G5 u3 */
+       if (uninorth_node == NULL) {
+               uninorth_node = of_find_node_by_name(NULL, "u3");
+               uninorth_u3 = 1;
+       }
+       if (uninorth_node && uninorth_node->n_addrs > 0) {
+               unsigned long address = uninorth_node->addrs[0].address;
+               uninorth_base = ioremap(address, 0x40000);
+               uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
+               if (uninorth_u3)
+                       u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
+       } else
+               uninorth_node = NULL;
+
+       if (!uninorth_node)
+               return;
+
+       printk(KERN_INFO "Found %s memory controller & host bridge, revision: %d\n",
+              uninorth_u3 ? "U3" : "UniNorth", uninorth_rev);
+       printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
+
+       /* Set the arbitrer QAck delay according to what Apple does
+        */
+       if (uninorth_rev < 0x11) {
+               actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK;
+               actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 :
+                       UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT;
+               UN_OUT(UNI_N_ARB_CTRL, actrl);
+       }
+
+       /* Some more magic as done by them in recent MacOS X on UniNorth
+        * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI
+        * memory timeout
+        */
+       if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0)
+               UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff);
+}
+
+static void __init probe_one_macio(const char *name, const char *compat, int type)
+{
+       struct device_node*     node;
+       int                     i;
+       volatile u32 __iomem *  base;
+       u32*                    revp;
+
+       node = find_devices(name);
+       if (!node || !node->n_addrs)
+               return;
+       if (compat)
+               do {
+                       if (device_is_compatible(node, compat))
+                               break;
+                       node = node->next;
+               } while (node);
+       if (!node)
+               return;
+       for(i=0; i<MAX_MACIO_CHIPS; i++) {
+               if (!macio_chips[i].of_node)
+                       break;
+               if (macio_chips[i].of_node == node)
+                       return;
+       }
+       if (i >= MAX_MACIO_CHIPS) {
+               printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
+               printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
+               return;
+       }
+       base = ioremap(node->addrs[0].address, node->addrs[0].size);
+       if (!base) {
+               printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
+               return;
+       }
+       if (type == macio_keylargo) {
+               u32 *did = (u32 *)get_property(node, "device-id", NULL);
+               if (*did == 0x00000025)
+                       type = macio_pangea;
+               if (*did == 0x0000003e)
+                       type = macio_intrepid;
+       }
+       macio_chips[i].of_node  = node;
+       macio_chips[i].type     = type;
+       macio_chips[i].base     = base;
+       macio_chips[i].flags    = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON;
+       macio_chips[i].name     = macio_names[type];
+       revp = (u32 *)get_property(node, "revision-id", NULL);
+       if (revp)
+               macio_chips[i].rev = *revp;
+       printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
+               macio_names[type], macio_chips[i].rev, macio_chips[i].base);
+}
+
+static int __init
+probe_macios(void)
+{
+       /* Warning, ordering is important */
+       probe_one_macio("gc", NULL, macio_grand_central);
+       probe_one_macio("ohare", NULL, macio_ohare);
+       probe_one_macio("pci106b,7", NULL, macio_ohareII);
+       probe_one_macio("mac-io", "keylargo", macio_keylargo);
+       probe_one_macio("mac-io", "paddington", macio_paddington);
+       probe_one_macio("mac-io", "gatwick", macio_gatwick);
+       probe_one_macio("mac-io", "heathrow", macio_heathrow);
+       probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
+
+       /* Make sure the "main" macio chip appear first */
+       if (macio_chips[0].type == macio_gatwick
+           && macio_chips[1].type == macio_heathrow) {
+               struct macio_chip temp = macio_chips[0];
+               macio_chips[0] = macio_chips[1];
+               macio_chips[1] = temp;
+       }
+       if (macio_chips[0].type == macio_ohareII
+           && macio_chips[1].type == macio_ohare) {
+               struct macio_chip temp = macio_chips[0];
+               macio_chips[0] = macio_chips[1];
+               macio_chips[1] = temp;
+       }
+       macio_chips[0].lbus.index = 0;
+       macio_chips[1].lbus.index = 1;
+
+       return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
+}
+
+static void __init
+initial_serial_shutdown(struct device_node *np)
+{
+       int len;
+       struct slot_names_prop {
+               int     count;
+               char    name[1];
+       } *slots;
+       char *conn;
+       int port_type = PMAC_SCC_ASYNC;
+       int modem = 0;
+
+       slots = (struct slot_names_prop *)get_property(np, "slot-names", &len);
+       conn = get_property(np, "AAPL,connector", &len);
+       if (conn && (strcmp(conn, "infrared") == 0))
+               port_type = PMAC_SCC_IRDA;
+       else if (device_is_compatible(np, "cobalt"))
+               modem = 1;
+       else if (slots && slots->count > 0) {
+               if (strcmp(slots->name, "IrDA") == 0)
+                       port_type = PMAC_SCC_IRDA;
+               else if (strcmp(slots->name, "Modem") == 0)
+                       modem = 1;
+       }
+       if (modem)
+               pmac_call_feature(PMAC_FTR_MODEM_ENABLE, np, 0, 0);
+       pmac_call_feature(PMAC_FTR_SCC_ENABLE, np, port_type, 0);
+}
+
+static void __init
+set_initial_features(void)
+{
+       struct device_node *np;
+
+       /* That hack appears to be necessary for some StarMax motherboards
+        * but I'm not too sure it was audited for side-effects on other
+        * ohare based machines...
+        * Since I still have difficulties figuring the right way to
+        * differenciate them all and since that hack was there for a long
+        * time, I'll keep it around
+        */
+       if (macio_chips[0].type == macio_ohare && !find_devices("via-pmu")) {
+               struct macio_chip *macio = &macio_chips[0];
+               MACIO_OUT32(OHARE_FCR, STARMAX_FEATURES);
+       } else if (macio_chips[0].type == macio_ohare) {
+               struct macio_chip *macio = &macio_chips[0];
+               MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
+       } else if (macio_chips[1].type == macio_ohare) {
+               struct macio_chip *macio = &macio_chips[1];
+               MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
+       }
+
+#ifdef CONFIG_POWER4
+       if (macio_chips[0].type == macio_keylargo2) {
+#ifndef CONFIG_SMP
+               /* On SMP machines running UP, we have the second CPU eating
+                * bus cycles. We need to take it off the bus. This is done
+                * from pmac_smp for SMP kernels running on one CPU
+                */
+               np = of_find_node_by_type(NULL, "cpu");
+               if (np != NULL)
+                       np = of_find_node_by_type(np, "cpu");
+               if (np != NULL) {
+                       g5_phy_disable_cpu1();
+                       of_node_put(np);
+               }
+#endif /* CONFIG_SMP */
+               /* Enable GMAC for now for PCI probing. It will be disabled
+                * later on after PCI probe
+                */
+               np = of_find_node_by_name(NULL, "ethernet");
+               while(np) {
+                       if (device_is_compatible(np, "K2-GMAC"))
+                               g5_gmac_enable(np, 0, 1);
+                       np = of_find_node_by_name(np, "ethernet");
+               }
+
+               /* Enable FW before PCI probe. Will be disabled later on
+                * Note: We should have a batter way to check that we are
+                * dealing with uninorth internal cell and not a PCI cell
+                * on the external PCI. The code below works though.
+                */
+               np = of_find_node_by_name(NULL, "firewire");
+               while(np) {
+                       if (device_is_compatible(np, "pci106b,5811")) {
+                               macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
+                               g5_fw_enable(np, 0, 1);
+                       }
+                       np = of_find_node_by_name(np, "firewire");
+               }
+       }
+#else /* CONFIG_POWER4 */
+
+       if (macio_chips[0].type == macio_keylargo ||
+           macio_chips[0].type == macio_pangea ||
+           macio_chips[0].type == macio_intrepid) {
+               /* Enable GMAC for now for PCI probing. It will be disabled
+                * later on after PCI probe
+                */
+               np = of_find_node_by_name(NULL, "ethernet");
+               while(np) {
+                       if (np->parent
+                           && device_is_compatible(np->parent, "uni-north")
+                           && device_is_compatible(np, "gmac"))
+                               core99_gmac_enable(np, 0, 1);
+                       np = of_find_node_by_name(np, "ethernet");
+               }
+
+               /* Enable FW before PCI probe. Will be disabled later on
+                * Note: We should have a batter way to check that we are
+                * dealing with uninorth internal cell and not a PCI cell
+                * on the external PCI. The code below works though.
+                */
+               np = of_find_node_by_name(NULL, "firewire");
+               while(np) {
+                       if (np->parent
+                           && device_is_compatible(np->parent, "uni-north")
+                           && (device_is_compatible(np, "pci106b,18") ||
+                               device_is_compatible(np, "pci106b,30") ||
+                               device_is_compatible(np, "pci11c1,5811"))) {
+                               macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
+                               core99_firewire_enable(np, 0, 1);
+                       }
+                       np = of_find_node_by_name(np, "firewire");
+               }
+
+               /* Enable ATA-100 before PCI probe. */
+               np = of_find_node_by_name(NULL, "ata-6");
+               while(np) {
+                       if (np->parent
+                           && device_is_compatible(np->parent, "uni-north")
+                           && device_is_compatible(np, "kauai-ata")) {
+                               core99_ata100_enable(np, 1);
+                       }
+                       np = of_find_node_by_name(np, "ata-6");
+               }
+
+               /* Switch airport off */
+               np = find_devices("radio");
+               while(np) {
+                       if (np && np->parent == macio_chips[0].of_node) {
+                               macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON;
+                               core99_airport_enable(np, 0, 0);
+                       }
+                       np = np->next;
+               }
+       }
+
+       /* On all machines that support sound PM, switch sound off */
+       if (macio_chips[0].of_node)
+               pmac_do_feature_call(PMAC_FTR_SOUND_CHIP_ENABLE,
+                       macio_chips[0].of_node, 0, 0);
+
+       /* While on some desktop G3s, we turn it back on */
+       if (macio_chips[0].of_node && macio_chips[0].type == macio_heathrow
+               && (pmac_mb.model_id == PMAC_TYPE_GOSSAMER ||
+                   pmac_mb.model_id == PMAC_TYPE_SILK)) {
+               struct macio_chip *macio = &macio_chips[0];
+               MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+               MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
+       }
+
+       /* Some machine models need the clock chip to be properly setup for
+        * clock spreading now. This should be a platform function but we
+        * don't do these at the moment
+        */
+       pmac_tweak_clock_spreading(1);
+
+#endif /* CONFIG_POWER4 */
+
+       /* On all machines, switch modem & serial ports off */
+       np = find_devices("ch-a");
+       while(np) {
+               initial_serial_shutdown(np);
+               np = np->next;
+       }
+       np = find_devices("ch-b");
+       while(np) {
+               initial_serial_shutdown(np);
+               np = np->next;
+       }
+}
+
+void __init
+pmac_feature_init(void)
+{
+       /* Detect the UniNorth memory controller */
+       probe_uninorth();
+
+       /* Probe mac-io controllers */
+       if (probe_macios()) {
+               printk(KERN_WARNING "No mac-io chip found\n");
+               return;
+       }
+
+       /* Setup low-level i2c stuffs */
+       pmac_init_low_i2c();
+
+       /* Probe machine type */
+       if (probe_motherboard())
+               printk(KERN_WARNING "Unknown PowerMac !\n");
+
+       /* Set some initial features (turn off some chips that will
+        * be later turned on)
+        */
+       set_initial_features();
+}
+
+int __init pmac_feature_late_init(void)
+{
+#if 0
+       struct device_node *np;
+
+       /* Request some resources late */
+       if (uninorth_node)
+               request_OF_resource(uninorth_node, 0, NULL);
+       np = find_devices("hammerhead");
+       if (np)
+               request_OF_resource(np, 0, NULL);
+       np = find_devices("interrupt-controller");
+       if (np)
+               request_OF_resource(np, 0, NULL);
+#endif
+       return 0;
+}
+
+device_initcall(pmac_feature_late_init);
+
+#if 0
+static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
+{
+       int     freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
+       int     bits[8] = { 8,16,0,32,2,4,0,0 };
+       int     freq = (frq >> 8) & 0xf;
+
+       if (freqs[freq] == 0)
+               printk("%s: Unknown HT link frequency %x\n", name, freq);
+       else
+               printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
+                      name, freqs[freq],
+                      bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
+}
+
+void __init pmac_check_ht_link(void)
+{
+       u32     ufreq, freq, ucfg, cfg;
+       struct device_node *pcix_node;
+       u8      px_bus, px_devfn;
+       struct pci_controller *px_hose;
+
+       (void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
+       ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
+       ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
+       dump_HT_speeds("U3 HyperTransport", cfg, freq);
+
+       pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
+       if (pcix_node == NULL) {
+               printk("No PCI-X bridge found\n");
+               return;
+       }
+       if (pci_device_from_OF_node(pcix_node, &px_bus, &px_devfn) != 0) {
+               printk("PCI-X bridge found but not matched to pci\n");
+               return;
+       }
+       px_hose = pci_find_hose_for_OF_device(pcix_node);
+       if (px_hose == NULL) {
+               printk("PCI-X bridge found but not matched to host\n");
+               return;
+       }       
+       early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
+       early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
+       dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
+       early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
+       early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
+       dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
+}
+#endif /* 0 */
+
+/*
+ * Early video resume hook
+ */
+
+static void (*pmac_early_vresume_proc)(void *data);
+static void *pmac_early_vresume_data;
+
+void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
+{
+       if (_machine != _MACH_Pmac)
+               return;
+       preempt_disable();
+       pmac_early_vresume_proc = proc;
+       pmac_early_vresume_data = data;
+       preempt_enable();
+}
+EXPORT_SYMBOL(pmac_set_early_video_resume);
+
+void pmac_call_early_video_resume(void)
+{
+       if (pmac_early_vresume_proc)
+               pmac_early_vresume_proc(pmac_early_vresume_data);
+}
+
+/*
+ * AGP related suspend/resume code
+ */
+
+static struct pci_dev *pmac_agp_bridge;
+static int (*pmac_agp_suspend)(struct pci_dev *bridge);
+static int (*pmac_agp_resume)(struct pci_dev *bridge);
+
+void pmac_register_agp_pm(struct pci_dev *bridge,
+                                int (*suspend)(struct pci_dev *bridge),
+                                int (*resume)(struct pci_dev *bridge))
+{
+       if (suspend || resume) {
+               pmac_agp_bridge = bridge;
+               pmac_agp_suspend = suspend;
+               pmac_agp_resume = resume;
+               return;
+       }
+       if (bridge != pmac_agp_bridge)
+               return;
+       pmac_agp_suspend = pmac_agp_resume = NULL;
+       return;
+}
+EXPORT_SYMBOL(pmac_register_agp_pm);
+
+void pmac_suspend_agp_for_card(struct pci_dev *dev)
+{
+       if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
+               return;
+       if (pmac_agp_bridge->bus != dev->bus)
+               return;
+       pmac_agp_suspend(pmac_agp_bridge);
+}
+EXPORT_SYMBOL(pmac_suspend_agp_for_card);
+
+void pmac_resume_agp_for_card(struct pci_dev *dev)
+{
+       if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
+               return;
+       if (pmac_agp_bridge->bus != dev->bus)
+               return;
+       pmac_agp_resume(pmac_agp_bridge);
+}
+EXPORT_SYMBOL(pmac_resume_agp_for_card);
similarity index 60%
rename from arch/ppc64/kernel/pmac_nvram.c
rename to arch/powerpc/platforms/powermac/nvram.c
index e32a902..4042e2f 100644 (file)
 #include <linux/kernel.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
+#include <linux/nvram.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
 #include <linux/bootmem.h>
 #include <linux/completion.h>
 #include <linux/spinlock.h>
@@ -72,20 +75,38 @@ struct core99_header {
 /*
  * Read and write the non-volatile RAM on PowerMacs and CHRP machines.
  */
+static int nvram_naddrs;
 static volatile unsigned char *nvram_data;
+static int is_core_99;
 static int core99_bank = 0;
+static int nvram_partitions[3];
 // XXX Turn that into a sem
 static DEFINE_SPINLOCK(nv_lock);
 
+extern int pmac_newworld;
 extern int system_running;
 
 static int (*core99_write_bank)(int bank, u8* datas);
 static int (*core99_erase_bank)(int bank);
 
-static char *nvram_image __pmacdata;
+static char *nvram_image;
 
 
-static ssize_t __pmac core99_nvram_read(char *buf, size_t count, loff_t *index)
+static unsigned char core99_nvram_read_byte(int addr)
+{
+       if (nvram_image == NULL)
+               return 0xff;
+       return nvram_image[addr];
+}
+
+static void core99_nvram_write_byte(int addr, unsigned char val)
+{
+       if (nvram_image == NULL)
+               return;
+       nvram_image[addr] = val;
+}
+
+static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index)
 {
        int i;
 
@@ -103,7 +124,7 @@ static ssize_t __pmac core99_nvram_read(char *buf, size_t count, loff_t *index)
        return count;
 }
 
-static ssize_t __pmac core99_nvram_write(char *buf, size_t count, loff_t *index)
+static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index)
 {
        int i;
 
@@ -121,14 +142,95 @@ static ssize_t __pmac core99_nvram_write(char *buf, size_t count, loff_t *index)
        return count;
 }
 
-static ssize_t __pmac core99_nvram_size(void)
+static ssize_t core99_nvram_size(void)
 {
        if (nvram_image == NULL)
                return -ENODEV;
        return NVRAM_SIZE;
 }
 
-static u8 __pmac chrp_checksum(struct chrp_header* hdr)
+#ifdef CONFIG_PPC32
+static volatile unsigned char *nvram_addr;
+static int nvram_mult;
+
+static unsigned char direct_nvram_read_byte(int addr)
+{
+       return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
+}
+
+static void direct_nvram_write_byte(int addr, unsigned char val)
+{
+       out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
+}
+
+
+static unsigned char indirect_nvram_read_byte(int addr)
+{
+       unsigned char val;
+       unsigned long flags;
+
+       spin_lock_irqsave(&nv_lock, flags);
+       out_8(nvram_addr, addr >> 5);
+       val = in_8(&nvram_data[(addr & 0x1f) << 4]);
+       spin_unlock_irqrestore(&nv_lock, flags);
+
+       return val;
+}
+
+static void indirect_nvram_write_byte(int addr, unsigned char val)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&nv_lock, flags);
+       out_8(nvram_addr, addr >> 5);
+       out_8(&nvram_data[(addr & 0x1f) << 4], val);
+       spin_unlock_irqrestore(&nv_lock, flags);
+}
+
+
+#ifdef CONFIG_ADB_PMU
+
+static void pmu_nvram_complete(struct adb_request *req)
+{
+       if (req->arg)
+               complete((struct completion *)req->arg);
+}
+
+static unsigned char pmu_nvram_read_byte(int addr)
+{
+       struct adb_request req;
+       DECLARE_COMPLETION(req_complete); 
+       
+       req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
+       if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM,
+                       (addr >> 8) & 0xff, addr & 0xff))
+               return 0xff;
+       if (system_state == SYSTEM_RUNNING)
+               wait_for_completion(&req_complete);
+       while (!req.complete)
+               pmu_poll();
+       return req.reply[0];
+}
+
+static void pmu_nvram_write_byte(int addr, unsigned char val)
+{
+       struct adb_request req;
+       DECLARE_COMPLETION(req_complete); 
+       
+       req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
+       if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM,
+                       (addr >> 8) & 0xff, addr & 0xff, val))
+               return;
+       if (system_state == SYSTEM_RUNNING)
+               wait_for_completion(&req_complete);
+       while (!req.complete)
+               pmu_poll();
+}
+
+#endif /* CONFIG_ADB_PMU */
+#endif /* CONFIG_PPC32 */
+
+static u8 chrp_checksum(struct chrp_header* hdr)
 {
        u8 *ptr;
        u16 sum = hdr->signature;
@@ -139,7 +241,7 @@ static u8 __pmac chrp_checksum(struct chrp_header* hdr)
        return sum;
 }
 
-static u32 __pmac core99_calc_adler(u8 *buffer)
+static u32 core99_calc_adler(u8 *buffer)
 {
        int cnt;
        u32 low, high;
@@ -161,7 +263,7 @@ static u32 __pmac core99_calc_adler(u8 *buffer)
        return (high << 16) | low;
 }
 
-static u32 __pmac core99_check(u8* datas)
+static u32 core99_check(u8* datas)
 {
        struct core99_header* hdr99 = (struct core99_header*)datas;
 
@@ -180,7 +282,7 @@ static u32 __pmac core99_check(u8* datas)
        return hdr99->generation;
 }
 
-static int __pmac sm_erase_bank(int bank)
+static int sm_erase_bank(int bank)
 {
        int stat, i;
        unsigned long timeout;
@@ -194,7 +296,7 @@ static int __pmac sm_erase_bank(int bank)
        timeout = 0;
        do {
                if (++timeout > 1000000) {
-                       printk(KERN_ERR "nvram: Sharp/Miron flash erase timeout !\n");
+                       printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n");
                        break;
                }
                out_8(base, SM_FLASH_CMD_READ_STATUS);
@@ -212,7 +314,7 @@ static int __pmac sm_erase_bank(int bank)
        return 0;
 }
 
-static int __pmac sm_write_bank(int bank, u8* datas)
+static int sm_write_bank(int bank, u8* datas)
 {
        int i, stat = 0;
        unsigned long timeout;
@@ -247,7 +349,7 @@ static int __pmac sm_write_bank(int bank, u8* datas)
        return 0;
 }
 
-static int __pmac amd_erase_bank(int bank)
+static int amd_erase_bank(int bank)
 {
        int i, stat = 0;
        unsigned long timeout;
@@ -294,7 +396,7 @@ static int __pmac amd_erase_bank(int bank)
        return 0;
 }
 
-static int __pmac amd_write_bank(int bank, u8* datas)
+static int amd_write_bank(int bank, u8* datas)
 {
        int i, stat = 0;
        unsigned long timeout;
@@ -340,12 +442,49 @@ static int __pmac amd_write_bank(int bank, u8* datas)
        return 0;
 }
 
+static void __init lookup_partitions(void)
+{
+       u8 buffer[17];
+       int i, offset;
+       struct chrp_header* hdr;
+
+       if (pmac_newworld) {
+               nvram_partitions[pmac_nvram_OF] = -1;
+               nvram_partitions[pmac_nvram_XPRAM] = -1;
+               nvram_partitions[pmac_nvram_NR] = -1;
+               hdr = (struct chrp_header *)buffer;
+
+               offset = 0;
+               buffer[16] = 0;
+               do {
+                       for (i=0;i<16;i++)
+                               buffer[i] = ppc_md.nvram_read_val(offset+i);
+                       if (!strcmp(hdr->name, "common"))
+                               nvram_partitions[pmac_nvram_OF] = offset + 0x10;
+                       if (!strcmp(hdr->name, "APL,MacOS75")) {
+                               nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10;
+                               nvram_partitions[pmac_nvram_NR] = offset + 0x110;
+                       }
+                       offset += (hdr->len * 0x10);
+               } while(offset < NVRAM_SIZE);
+       } else {
+               nvram_partitions[pmac_nvram_OF] = 0x1800;
+               nvram_partitions[pmac_nvram_XPRAM] = 0x1300;
+               nvram_partitions[pmac_nvram_NR] = 0x1400;
+       }
+       DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]);
+       DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]);
+       DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
+}
 
-static int __pmac core99_nvram_sync(void)
+static void core99_nvram_sync(void)
 {
        struct core99_header* hdr99;
        unsigned long flags;
 
+       if (!is_core_99 || !nvram_data || !nvram_image)
+               return;
+
        spin_lock_irqsave(&nv_lock, flags);
        if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
                NVRAM_SIZE))
@@ -370,32 +509,28 @@ static int __pmac core99_nvram_sync(void)
  bail:
        spin_unlock_irqrestore(&nv_lock, flags);
 
-       return 0;
+#ifdef DEBUG
+               mdelay(2000);
+#endif
 }
 
-int __init pmac_nvram_init(void)
+static int __init core99_nvram_setup(struct device_node *dp)
 {
-       struct device_node *dp;
-       u32 gen_bank0, gen_bank1;
        int i;
+       u32 gen_bank0, gen_bank1;
 
-       dp = find_devices("nvram");
-       if (dp == NULL) {
-               printk(KERN_ERR "Can't find NVRAM device\n");
-               return -ENODEV;
-       }
-       if (!device_is_compatible(dp, "nvram,flash")) {
-               printk(KERN_ERR "Incompatible type of NVRAM\n");
-               return -ENXIO;
+       if (nvram_naddrs < 1) {
+               printk(KERN_ERR "nvram: no address\n");
+               return -EINVAL;
        }
-
        nvram_image = alloc_bootmem(NVRAM_SIZE);
        if (nvram_image == NULL) {
                printk(KERN_ERR "nvram: can't allocate ram image\n");
                return -ENOMEM;
        }
        nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
-       
+       nvram_naddrs = 1; /* Make sure we get the correct case */
+
        DBG("nvram: Checking bank 0...\n");
 
        gen_bank0 = core99_check((u8 *)nvram_data);
@@ -408,11 +543,12 @@ int __init pmac_nvram_init(void)
        for (i=0; i<NVRAM_SIZE; i++)
                nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
 
+       ppc_md.nvram_read_val   = core99_nvram_read_byte;
+       ppc_md.nvram_write_val  = core99_nvram_write_byte;
        ppc_md.nvram_read       = core99_nvram_read;
        ppc_md.nvram_write      = core99_nvram_write;
        ppc_md.nvram_size       = core99_nvram_size;
        ppc_md.nvram_sync       = core99_nvram_sync;
-       
        /* 
         * Maybe we could be smarter here though making an exclusive list
         * of known flash chips is a bit nasty as older OF didn't provide us
@@ -427,67 +563,81 @@ int __init pmac_nvram_init(void)
                core99_erase_bank = sm_erase_bank;
                core99_write_bank = sm_write_bank;
        }
-
        return 0;
 }
 
-int __pmac pmac_get_partition(int partition)
+int __init pmac_nvram_init(void)
 {
-       struct nvram_partition *part;
-       const char *name;
-       int sig;
-
-       switch(partition) {
-       case pmac_nvram_OF:
-               name = "common";
-               sig = NVRAM_SIG_SYS;
-               break;
-       case pmac_nvram_XPRAM:
-               name = "APL,MacOS75";
-               sig = NVRAM_SIG_OS;
-               break;
-       case pmac_nvram_NR:
-       default:
-               /* Oldworld stuff */
+       struct device_node *dp;
+       int err = 0;
+
+       nvram_naddrs = 0;
+
+       dp = find_devices("nvram");
+       if (dp == NULL) {
+               printk(KERN_ERR "Can't find NVRAM device\n");
                return -ENODEV;
        }
+       nvram_naddrs = dp->n_addrs;
+       is_core_99 = device_is_compatible(dp, "nvram,flash");
+       if (is_core_99)
+               err = core99_nvram_setup(dp);
+#ifdef CONFIG_PPC32
+       else if (_machine == _MACH_chrp && nvram_naddrs == 1) {
+               nvram_data = ioremap(dp->addrs[0].address + isa_mem_base,
+                                    dp->addrs[0].size);
+               nvram_mult = 1;
+               ppc_md.nvram_read_val   = direct_nvram_read_byte;
+               ppc_md.nvram_write_val  = direct_nvram_write_byte;
+       } else if (nvram_naddrs == 1) {
+               nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size);
+               nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE;
+               ppc_md.nvram_read_val   = direct_nvram_read_byte;
+               ppc_md.nvram_write_val  = direct_nvram_write_byte;
+       } else if (nvram_naddrs == 2) {
+               nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size);
+               nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size);
+               ppc_md.nvram_read_val   = indirect_nvram_read_byte;
+               ppc_md.nvram_write_val  = indirect_nvram_write_byte;
+       } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
+#ifdef CONFIG_ADB_PMU
+               nvram_naddrs = -1;
+               ppc_md.nvram_read_val   = pmu_nvram_read_byte;
+               ppc_md.nvram_write_val  = pmu_nvram_write_byte;
+#endif /* CONFIG_ADB_PMU */
+       }
+#endif
+       else {
+               printk(KERN_ERR "Incompatible type of NVRAM\n");
+               return -ENXIO;
+       }
+       lookup_partitions();
+       return err;
+}
 
-       part = nvram_find_partition(sig, name);
-       if (part == NULL)
-               return 0;
-
-       return part->index;
+int pmac_get_partition(int partition)
+{
+       return nvram_partitions[partition];
 }
 
-u8 __pmac pmac_xpram_read(int xpaddr)
+u8 pmac_xpram_read(int xpaddr)
 {
        int offset = pmac_get_partition(pmac_nvram_XPRAM);
-       loff_t index;
-       u8 buf;
-       ssize_t count;
 
        if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
                return 0xff;
-       index = offset + xpaddr;
 
-       count = ppc_md.nvram_read(&buf, 1, &index);
-       if (count != 1)
-               return 0xff;
-       return buf;
+       return ppc_md.nvram_read_val(xpaddr + offset);
 }
 
-void __pmac pmac_xpram_write(int xpaddr, u8 data)
+void pmac_xpram_write(int xpaddr, u8 data)
 {
        int offset = pmac_get_partition(pmac_nvram_XPRAM);
-       loff_t index;
-       u8 buf;
 
        if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
                return;
-       index = offset + xpaddr;
-       buf = data;
 
-       ppc_md.nvram_write(&buf, 1, &index);
+       ppc_md.nvram_write_val(xpaddr + offset, data);
 }
 
 EXPORT_SYMBOL(pmac_get_partition);
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
new file mode 100644 (file)
index 0000000..ebe22a2
--- /dev/null
@@ -0,0 +1,1213 @@
+/*
+ * Support for PCI bridges found on Power Macintoshes.
+ *
+ * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
+ * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#ifdef CONFIG_PPC64
+#include <asm/iommu.h>
+#include <asm/ppc-pci.h>
+#endif
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+static int add_bridge(struct device_node *dev);
+
+/* XXX Could be per-controller, but I don't think we risk anything by
+ * assuming we won't have both UniNorth and Bandit */
+static int has_uninorth;
+#ifdef CONFIG_PPC64
+static struct pci_controller *u3_agp;
+static struct pci_controller *u3_ht;
+#endif /* CONFIG_PPC64 */
+
+extern u8 pci_cache_line_size;
+extern int pcibios_assign_bus_offset;
+
+struct device_node *k2_skiplist[2];
+
+/*
+ * Magic constants for enabling cache coherency in the bandit/PSX bridge.
+ */
+#define BANDIT_DEVID_2 8
+#define BANDIT_REVID   3
+
+#define BANDIT_DEVNUM  11
+#define BANDIT_MAGIC   0x50
+#define BANDIT_COHERENT        0x40
+
+static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
+{
+       for (; node != 0;node = node->sibling) {
+               int * bus_range;
+               unsigned int *class_code;
+               int len;
+
+               /* For PCI<->PCI bridges or CardBus bridges, we go down */
+               class_code = (unsigned int *) get_property(node, "class-code", NULL);
+               if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
+                       (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
+                       continue;
+               bus_range = (int *) get_property(node, "bus-range", &len);
+               if (bus_range != NULL && len > 2 * sizeof(int)) {
+                       if (bus_range[1] > higher)
+                               higher = bus_range[1];
+               }
+               higher = fixup_one_level_bus_range(node->child, higher);
+       }
+       return higher;
+}
+
+/* This routine fixes the "bus-range" property of all bridges in the
+ * system since they tend to have their "last" member wrong on macs
+ *
+ * Note that the bus numbers manipulated here are OF bus numbers, they
+ * are not Linux bus numbers.
+ */
+static void __init fixup_bus_range(struct device_node *bridge)
+{
+       int * bus_range;
+       int len;
+
+       /* Lookup the "bus-range" property for the hose */
+       bus_range = (int *) get_property(bridge, "bus-range", &len);
+       if (bus_range == NULL || len < 2 * sizeof(int)) {
+               printk(KERN_WARNING "Can't get bus-range for %s\n",
+                              bridge->full_name);
+               return;
+       }
+       bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
+}
+
+/*
+ * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
+ *
+ * The "Bandit" version is present in all early PCI PowerMacs,
+ * and up to the first ones using Grackle. Some machines may
+ * have 2 bandit controllers (2 PCI busses).
+ *
+ * "Chaos" is used in some "Bandit"-type machines as a bridge
+ * for the separate display bus. It is accessed the same
+ * way as bandit, but cannot be probed for devices. It therefore
+ * has its own config access functions.
+ *
+ * The "UniNorth" version is present in all Core99 machines
+ * (iBook, G4, new IMacs, and all the recent Apple machines).
+ * It contains 3 controllers in one ASIC.
+ *
+ * The U3 is the bridge used on G5 machines. It contains an
+ * AGP bus which is dealt with the old UniNorth access routines
+ * and a HyperTransport bus which uses its own set of access
+ * functions.
+ */
+
+#define MACRISC_CFA0(devfn, off)       \
+       ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
+       | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
+       | (((unsigned long)(off)) & 0xFCUL))
+
+#define MACRISC_CFA1(bus, devfn, off)  \
+       ((((unsigned long)(bus)) << 16) \
+       |(((unsigned long)(devfn)) << 8) \
+       |(((unsigned long)(off)) & 0xFCUL) \
+       |1UL)
+
+static unsigned long macrisc_cfg_access(struct pci_controller* hose,
+                                              u8 bus, u8 dev_fn, u8 offset)
+{
+       unsigned int caddr;
+
+       if (bus == hose->first_busno) {
+               if (dev_fn < (11 << 3))
+                       return 0;
+               caddr = MACRISC_CFA0(dev_fn, offset);
+       } else
+               caddr = MACRISC_CFA1(bus, dev_fn, offset);
+
+       /* Uninorth will return garbage if we don't read back the value ! */
+       do {
+               out_le32(hose->cfg_addr, caddr);
+       } while (in_le32(hose->cfg_addr) != caddr);
+
+       offset &= has_uninorth ? 0x07 : 0x03;
+       return ((unsigned long)hose->cfg_data) + offset;
+}
+
+static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
+                                     int offset, int len, u32 *val)
+{
+       struct pci_controller *hose;
+       unsigned long addr;
+
+       hose = pci_bus_to_host(bus);
+       if (hose == NULL)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
+       if (!addr)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+       /*
+        * Note: the caller has already checked that offset is
+        * suitably aligned and that len is 1, 2 or 4.
+        */
+       switch (len) {
+       case 1:
+               *val = in_8((u8 *)addr);
+               break;
+       case 2:
+               *val = in_le16((u16 *)addr);
+               break;
+       default:
+               *val = in_le32((u32 *)addr);
+               break;
+       }
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
+                                      int offset, int len, u32 val)
+{
+       struct pci_controller *hose;
+       unsigned long addr;
+
+       hose = pci_bus_to_host(bus);
+       if (hose == NULL)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
+       if (!addr)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+       /*
+        * Note: the caller has already checked that offset is
+        * suitably aligned and that len is 1, 2 or 4.
+        */
+       switch (len) {
+       case 1:
+               out_8((u8 *)addr, val);
+               (void) in_8((u8 *)addr);
+               break;
+       case 2:
+               out_le16((u16 *)addr, val);
+               (void) in_le16((u16 *)addr);
+               break;
+       default:
+               out_le32((u32 *)addr, val);
+               (void) in_le32((u32 *)addr);
+               break;
+       }
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops macrisc_pci_ops =
+{
+       macrisc_read_config,
+       macrisc_write_config
+};
+
+#ifdef CONFIG_PPC32
+/*
+ * Verify that a specific (bus, dev_fn) exists on chaos
+ */
+static int
+chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
+{
+       struct device_node *np;
+       u32 *vendor, *device;
+
+       np = pci_busdev_to_OF_node(bus, devfn);
+       if (np == NULL)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       vendor = (u32 *)get_property(np, "vendor-id", NULL);
+       device = (u32 *)get_property(np, "device-id", NULL);
+       if (vendor == NULL || device == NULL)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
+           && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
+                 int len, u32 *val)
+{
+       int result = chaos_validate_dev(bus, devfn, offset);
+       if (result == PCIBIOS_BAD_REGISTER_NUMBER)
+               *val = ~0U;
+       if (result != PCIBIOS_SUCCESSFUL)
+               return result;
+       return macrisc_read_config(bus, devfn, offset, len, val);
+}
+
+static int
+chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
+                  int len, u32 val)
+{
+       int result = chaos_validate_dev(bus, devfn, offset);
+       if (result != PCIBIOS_SUCCESSFUL)
+               return result;
+       return macrisc_write_config(bus, devfn, offset, len, val);
+}
+
+static struct pci_ops chaos_pci_ops =
+{
+       chaos_read_config,
+       chaos_write_config
+};
+
+static void __init setup_chaos(struct pci_controller *hose,
+                              struct reg_property *addr)
+{
+       /* assume a `chaos' bridge */
+       hose->ops = &chaos_pci_ops;
+       hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
+       hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
+}
+#else
+#define setup_chaos(hose, addr)
+#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PPC64
+/*
+ * These versions of U3 HyperTransport config space access ops do not
+ * implement self-view of the HT host yet
+ */
+
+/*
+ * This function deals with some "special cases" devices.
+ *
+ *  0 -> No special case
+ *  1 -> Skip the device but act as if the access was successfull
+ *       (return 0xff's on reads, eventually, cache config space
+ *       accesses in a later version)
+ * -1 -> Hide the device (unsuccessful acess)
+ */
+static int u3_ht_skip_device(struct pci_controller *hose,
+                            struct pci_bus *bus, unsigned int devfn)
+{
+       struct device_node *busdn, *dn;
+       int i;
+
+       /* We only allow config cycles to devices that are in OF device-tree
+        * as we are apparently having some weird things going on with some
+        * revs of K2 on recent G5s
+        */
+       if (bus->self)
+               busdn = pci_device_to_OF_node(bus->self);
+       else
+               busdn = hose->arch_data;
+       for (dn = busdn->child; dn; dn = dn->sibling)
+               if (dn->data && PCI_DN(dn)->devfn == devfn)
+                       break;
+       if (dn == NULL)
+               return -1;
+
+       /*
+        * When a device in K2 is powered down, we die on config
+        * cycle accesses. Fix that here.
+        */
+       for (i=0; i<2; i++)
+               if (k2_skiplist[i] == dn)
+                       return 1;
+
+       return 0;
+}
+
+#define U3_HT_CFA0(devfn, off)         \
+               ((((unsigned long)devfn) << 8) | offset)
+#define U3_HT_CFA1(bus, devfn, off)    \
+               (U3_HT_CFA0(devfn, off) \
+               + (((unsigned long)bus) << 16) \
+               + 0x01000000UL)
+
+static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
+                                            u8 bus, u8 devfn, u8 offset)
+{
+       if (bus == hose->first_busno) {
+               /* For now, we don't self probe U3 HT bridge */
+               if (PCI_SLOT(devfn) == 0)
+                       return 0;
+               return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
+       } else
+               return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
+}
+
+static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
+                                   int offset, int len, u32 *val)
+{
+       struct pci_controller *hose;
+       unsigned long addr;
+
+       hose = pci_bus_to_host(bus);
+       if (hose == NULL)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
+       if (!addr)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       switch (u3_ht_skip_device(hose, bus, devfn)) {
+       case 0:
+               break;
+       case 1:
+               switch (len) {
+               case 1:
+                       *val = 0xff; break;
+               case 2:
+                       *val = 0xffff; break;
+               default:
+                       *val = 0xfffffffful; break;
+               }
+               return PCIBIOS_SUCCESSFUL;
+       default:
+               return PCIBIOS_DEVICE_NOT_FOUND;
+       }
+
+       /*
+        * Note: the caller has already checked that offset is
+        * suitably aligned and that len is 1, 2 or 4.
+        */
+       switch (len) {
+       case 1:
+               *val = in_8((u8 *)addr);
+               break;
+       case 2:
+               *val = in_le16((u16 *)addr);
+               break;
+       default:
+               *val = in_le32((u32 *)addr);
+               break;
+       }
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
+                                    int offset, int len, u32 val)
+{
+       struct pci_controller *hose;
+       unsigned long addr;
+
+       hose = pci_bus_to_host(bus);
+       if (hose == NULL)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
+       if (!addr)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       switch (u3_ht_skip_device(hose, bus, devfn)) {
+       case 0:
+               break;
+       case 1:
+               return PCIBIOS_SUCCESSFUL;
+       default:
+               return PCIBIOS_DEVICE_NOT_FOUND;
+       }
+
+       /*
+        * Note: the caller has already checked that offset is
+        * suitably aligned and that len is 1, 2 or 4.
+        */
+       switch (len) {
+       case 1:
+               out_8((u8 *)addr, val);
+               (void) in_8((u8 *)addr);
+               break;
+       case 2:
+               out_le16((u16 *)addr, val);
+               (void) in_le16((u16 *)addr);
+               break;
+       default:
+               out_le32((u32 *)addr, val);
+               (void) in_le32((u32 *)addr);
+               break;
+       }
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops u3_ht_pci_ops =
+{
+       u3_ht_read_config,
+       u3_ht_write_config
+};
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_PPC32
+/*
+ * For a bandit bridge, turn on cache coherency if necessary.
+ * N.B. we could clean this up using the hose ops directly.
+ */
+static void __init init_bandit(struct pci_controller *bp)
+{
+       unsigned int vendev, magic;
+       int rev;
+
+       /* read the word at offset 0 in config space for device 11 */
+       out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_VENDOR_ID);
+       udelay(2);
+       vendev = in_le32(bp->cfg_data);
+       if (vendev == (PCI_DEVICE_ID_APPLE_BANDIT << 16) +
+                       PCI_VENDOR_ID_APPLE) {
+               /* read the revision id */
+               out_le32(bp->cfg_addr,
+                        (1UL << BANDIT_DEVNUM) + PCI_REVISION_ID);
+               udelay(2);
+               rev = in_8(bp->cfg_data);
+               if (rev != BANDIT_REVID)
+                       printk(KERN_WARNING
+                              "Unknown revision %d for bandit\n", rev);
+       } else if (vendev != (BANDIT_DEVID_2 << 16) + PCI_VENDOR_ID_APPLE) {
+               printk(KERN_WARNING "bandit isn't? (%x)\n", vendev);
+               return;
+       }
+
+       /* read the word at offset 0x50 */
+       out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC);
+       udelay(2);
+       magic = in_le32(bp->cfg_data);
+       if ((magic & BANDIT_COHERENT) != 0)
+               return;
+       magic |= BANDIT_COHERENT;
+       udelay(2);
+       out_le32(bp->cfg_data, magic);
+       printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n");
+}
+
+/*
+ * Tweak the PCI-PCI bridge chip on the blue & white G3s.
+ */
+static void __init init_p2pbridge(void)
+{
+       struct device_node *p2pbridge;
+       struct pci_controller* hose;
+       u8 bus, devfn;
+       u16 val;
+
+       /* XXX it would be better here to identify the specific
+          PCI-PCI bridge chip we have. */
+       if ((p2pbridge = find_devices("pci-bridge")) == 0
+           || p2pbridge->parent == NULL
+           || strcmp(p2pbridge->parent->name, "pci") != 0)
+               return;
+       if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) {
+               DBG("Can't find PCI infos for PCI<->PCI bridge\n");
+               return;
+       }
+       /* Warning: At this point, we have not yet renumbered all busses.
+        * So we must use OF walking to find out hose
+        */
+       hose = pci_find_hose_for_OF_device(p2pbridge);
+       if (!hose) {
+               DBG("Can't find hose for PCI<->PCI bridge\n");
+               return;
+       }
+       if (early_read_config_word(hose, bus, devfn,
+                                  PCI_BRIDGE_CONTROL, &val) < 0) {
+               printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n");
+               return;
+       }
+       val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
+       early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
+}
+
+/*
+ * Some Apple desktop machines have a NEC PD720100A USB2 controller
+ * on the motherboard. Open Firmware, on these, will disable the
+ * EHCI part of it so it behaves like a pair of OHCI's. This fixup
+ * code re-enables it ;)
+ */
+static void __init fixup_nec_usb2(void)
+{
+       struct device_node *nec;
+
+       for (nec = NULL; (nec = of_find_node_by_name(nec, "usb")) != NULL;) {
+               struct pci_controller *hose;
+               u32 data, *prop;
+               u8 bus, devfn;
+
+               prop = (u32 *)get_property(nec, "vendor-id", NULL);
+               if (prop == NULL)
+                       continue;
+               if (0x1033 != *prop)
+                       continue;
+               prop = (u32 *)get_property(nec, "device-id", NULL);
+               if (prop == NULL)
+                       continue;
+               if (0x0035 != *prop)
+                       continue;
+               prop = (u32 *)get_property(nec, "reg", NULL);
+               if (prop == NULL)
+                       continue;
+               devfn = (prop[0] >> 8) & 0xff;
+               bus = (prop[0] >> 16) & 0xff;
+               if (PCI_FUNC(devfn) != 0)
+                       continue;
+               hose = pci_find_hose_for_OF_device(nec);
+               if (!hose)
+                       continue;
+               early_read_config_dword(hose, bus, devfn, 0xe4, &data);
+               if (data & 1UL) {
+                       printk("Found NEC PD720100A USB2 chip with disabled EHCI, fixing up...\n");
+                       data &= ~1UL;
+                       early_write_config_dword(hose, bus, devfn, 0xe4, data);
+                       early_write_config_byte(hose, bus, devfn | 2, PCI_INTERRUPT_LINE,
+                               nec->intrs[0].line);
+               }
+       }
+}
+
+#define GRACKLE_CFA(b, d, o)   (0x80 | ((b) << 8) | ((d) << 16) \
+                                | (((o) & ~3) << 24))
+
+#define GRACKLE_PICR1_STG              0x00000040
+#define GRACKLE_PICR1_LOOPSNOOP                0x00000010
+
+/* N.B. this is called before bridges is initialized, so we can't
+   use grackle_pcibios_{read,write}_config_dword. */
+static inline void grackle_set_stg(struct pci_controller* bp, int enable)
+{
+       unsigned int val;
+
+       out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
+       val = in_le32(bp->cfg_data);
+       val = enable? (val | GRACKLE_PICR1_STG) :
+               (val & ~GRACKLE_PICR1_STG);
+       out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
+       out_le32(bp->cfg_data, val);
+       (void)in_le32(bp->cfg_data);
+}
+
+static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
+{
+       unsigned int val;
+
+       out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
+       val = in_le32(bp->cfg_data);
+       val = enable? (val | GRACKLE_PICR1_LOOPSNOOP) :
+               (val & ~GRACKLE_PICR1_LOOPSNOOP);
+       out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
+       out_le32(bp->cfg_data, val);
+       (void)in_le32(bp->cfg_data);
+}
+
+void __init setup_grackle(struct pci_controller *hose)
+{
+       setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
+       if (machine_is_compatible("AAPL,PowerBook1998"))
+               grackle_set_loop_snoop(hose, 1);
+#if 0  /* Disabled for now, HW problems ??? */
+       grackle_set_stg(hose, 1);
+#endif
+}
+
+static void __init setup_bandit(struct pci_controller *hose,
+                               struct reg_property *addr)
+{
+       hose->ops = &macrisc_pci_ops;
+       hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
+       hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
+       init_bandit(hose);
+}
+
+static int __init setup_uninorth(struct pci_controller *hose,
+                                struct reg_property *addr)
+{
+       pci_assign_all_buses = 1;
+       has_uninorth = 1;
+       hose->ops = &macrisc_pci_ops;
+       hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
+       hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
+       /* We "know" that the bridge at f2000000 has the PCI slots. */
+       return addr->address == 0xf2000000;
+}
+#endif
+
+#ifdef CONFIG_PPC64
+static void __init setup_u3_agp(struct pci_controller* hose)
+{
+       /* On G5, we move AGP up to high bus number so we don't need
+        * to reassign bus numbers for HT. If we ever have P2P bridges
+        * on AGP, we'll have to move pci_assign_all_busses to the
+        * pci_controller structure so we enable it for AGP and not for
+        * HT childs.
+        * We hard code the address because of the different size of
+        * the reg address cell, we shall fix that by killing struct
+        * reg_property and using some accessor functions instead
+        */
+       hose->first_busno = 0xf0;
+       hose->last_busno = 0xff;
+       has_uninorth = 1;
+       hose->ops = &macrisc_pci_ops;
+       hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
+       hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
+
+       u3_agp = hose;
+}
+
+static void __init setup_u3_ht(struct pci_controller* hose)
+{
+       struct device_node *np = (struct device_node *)hose->arch_data;
+       int i, cur;
+
+       hose->ops = &u3_ht_pci_ops;
+
+       /* We hard code the address because of the different size of
+        * the reg address cell, we shall fix that by killing struct
+        * reg_property and using some accessor functions instead
+        */
+       hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
+
+       /*
+        * /ht node doesn't expose a "ranges" property, so we "remove" regions that
+        * have been allocated to AGP. So far, this version of the code doesn't assign
+        * any of the 0xfxxxxxxx "fine" memory regions to /ht.
+        * We need to fix that sooner or later by either parsing all child "ranges"
+        * properties or figuring out the U3 address space decoding logic and
+        * then read its configuration register (if any).
+        */
+       hose->io_base_phys = 0xf4000000;
+       hose->pci_io_size = 0x00400000;
+       hose->io_resource.name = np->full_name;
+       hose->io_resource.start = 0;
+       hose->io_resource.end = 0x003fffff;
+       hose->io_resource.flags = IORESOURCE_IO;
+       hose->pci_mem_offset = 0;
+       hose->first_busno = 0;
+       hose->last_busno = 0xef;
+       hose->mem_resources[0].name = np->full_name;
+       hose->mem_resources[0].start = 0x80000000;
+       hose->mem_resources[0].end = 0xefffffff;
+       hose->mem_resources[0].flags = IORESOURCE_MEM;
+
+       u3_ht = hose;
+
+       if (u3_agp == NULL) {
+               DBG("U3 has no AGP, using full resource range\n");
+               return;
+       }
+
+       /* We "remove" the AGP resources from the resources allocated to HT, that
+        * is we create "holes". However, that code does assumptions that so far
+        * happen to be true (cross fingers...), typically that resources in the
+        * AGP node are properly ordered
+        */
+       cur = 0;
+       for (i=0; i<3; i++) {
+               struct resource *res = &u3_agp->mem_resources[i];
+               if (res->flags != IORESOURCE_MEM)
+                       continue;
+               /* We don't care about "fine" resources */
+               if (res->start >= 0xf0000000)
+                       continue;
+               /* Check if it's just a matter of "shrinking" us in one direction */
+               if (hose->mem_resources[cur].start == res->start) {
+                       DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
+                           cur, hose->mem_resources[cur].start, res->end + 1);
+                       hose->mem_resources[cur].start = res->end + 1;
+                       continue;
+               }
+               if (hose->mem_resources[cur].end == res->end) {
+                       DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
+                           cur, hose->mem_resources[cur].end, res->start - 1);
+                       hose->mem_resources[cur].end = res->start - 1;
+                       continue;
+               }
+               /* No, it's not the case, we need a hole */
+               if (cur == 2) {
+                       /* not enough resources for a hole, we drop part of the range */
+                       printk(KERN_WARNING "Running out of resources for /ht host !\n");
+                       hose->mem_resources[cur].end = res->start - 1;
+                       continue;
+               }
+               cur++;
+               DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
+                   cur-1, res->start - 1, cur, res->end + 1);
+               hose->mem_resources[cur].name = np->full_name;
+               hose->mem_resources[cur].flags = IORESOURCE_MEM;
+               hose->mem_resources[cur].start = res->end + 1;
+               hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
+               hose->mem_resources[cur-1].end = res->start - 1;
+       }
+}
+
+/* XXX this needs to be converged between ppc32 and ppc64... */
+static struct pci_controller * __init pcibios_alloc_controller(void)
+{
+       struct pci_controller *hose;
+
+       hose = alloc_bootmem(sizeof(struct pci_controller));
+       if (hose)
+               pci_setup_pci_controller(hose);
+       return hose;
+}
+#endif
+
+/*
+ * We assume that if we have a G3 powermac, we have one bridge called
+ * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
+ * if we have one or more bandit or chaos bridges, we don't have a MPC106.
+ */
+static int __init add_bridge(struct device_node *dev)
+{
+       int len;
+       struct pci_controller *hose;
+#ifdef CONFIG_PPC32
+       struct reg_property *addr;
+#endif
+       char *disp_name;
+       int *bus_range;
+       int primary = 1;
+
+       DBG("Adding PCI host bridge %s\n", dev->full_name);
+
+#ifdef CONFIG_PPC32
+       /* XXX fix this */
+       addr = (struct reg_property *) get_property(dev, "reg", &len);
+       if (addr == NULL || len < sizeof(*addr)) {
+               printk(KERN_WARNING "Can't use %s: no address\n",
+                      dev->full_name);
+               return -ENODEV;
+       }
+#endif
+       bus_range = (int *) get_property(dev, "bus-range", &len);
+       if (bus_range == NULL || len < 2 * sizeof(int)) {
+               printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
+                              dev->full_name);
+       }
+
+       hose = pcibios_alloc_controller();
+       if (!hose)
+               return -ENOMEM;
+       hose->arch_data = dev;
+       hose->first_busno = bus_range ? bus_range[0] : 0;
+       hose->last_busno = bus_range ? bus_range[1] : 0xff;
+
+       disp_name = NULL;
+#ifdef CONFIG_POWER4
+       if (device_is_compatible(dev, "u3-agp")) {
+               setup_u3_agp(hose);
+               disp_name = "U3-AGP";
+               primary = 0;
+       } else if (device_is_compatible(dev, "u3-ht")) {
+               setup_u3_ht(hose);
+               disp_name = "U3-HT";
+               primary = 1;
+       }
+       printk(KERN_INFO "Found %s PCI host bridge.  Firmware bus number: %d->%d\n",
+               disp_name, hose->first_busno, hose->last_busno);
+#else
+       if (device_is_compatible(dev, "uni-north")) {
+               primary = setup_uninorth(hose, addr);
+               disp_name = "UniNorth";
+       } else if (strcmp(dev->name, "pci") == 0) {
+               /* XXX assume this is a mpc106 (grackle) */
+               setup_grackle(hose);
+               disp_name = "Grackle (MPC106)";
+       } else if (strcmp(dev->name, "bandit") == 0) {
+               setup_bandit(hose, addr);
+               disp_name = "Bandit";
+       } else if (strcmp(dev->name, "chaos") == 0) {
+               setup_chaos(hose, addr);
+               disp_name = "Chaos";
+               primary = 0;
+       }
+       printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. Firmware bus number: %d->%d\n",
+               disp_name, addr->address, hose->first_busno, hose->last_busno);
+#endif
+       DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
+               hose, hose->cfg_addr, hose->cfg_data);
+
+       /* Interpret the "ranges" property */
+       /* This also maps the I/O region and sets isa_io/mem_base */
+       pci_process_bridge_OF_ranges(hose, dev, primary);
+
+       /* Fixup "bus-range" OF property */
+       fixup_bus_range(dev);
+
+       return 0;
+}
+
+static void __init
+pcibios_fixup_OF_interrupts(void)
+{
+       struct pci_dev* dev = NULL;
+
+       /*
+        * Open Firmware often doesn't initialize the
+        * PCI_INTERRUPT_LINE config register properly, so we
+        * should find the device node and apply the interrupt
+        * obtained from the OF device-tree
+        */
+       for_each_pci_dev(dev) {
+               struct device_node *node;
+               node = pci_device_to_OF_node(dev);
+               /* this is the node, see if it has interrupts */
+               if (node && node->n_intrs > 0)
+                       dev->irq = node->intrs[0].line;
+               pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+       }
+}
+
+void __init
+pmac_pcibios_fixup(void)
+{
+       /* Fixup interrupts according to OF tree */
+       pcibios_fixup_OF_interrupts();
+}
+
+#ifdef CONFIG_PPC64
+static void __init pmac_fixup_phb_resources(void)
+{
+       struct pci_controller *hose, *tmp;
+
+       list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+               printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
+                      hose->global_number,
+                      hose->io_resource.start, hose->io_resource.end);
+       }
+}
+#endif
+
+void __init pmac_pci_init(void)
+{
+       struct device_node *np, *root;
+       struct device_node *ht = NULL;
+
+       root = of_find_node_by_path("/");
+       if (root == NULL) {
+               printk(KERN_CRIT "pmac_pci_init: can't find root "
+                      "of device tree\n");
+               return;
+       }
+       for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
+               if (np->name == NULL)
+                       continue;
+               if (strcmp(np->name, "bandit") == 0
+                   || strcmp(np->name, "chaos") == 0
+                   || strcmp(np->name, "pci") == 0) {
+                       if (add_bridge(np) == 0)
+                               of_node_get(np);
+               }
+               if (strcmp(np->name, "ht") == 0) {
+                       of_node_get(np);
+                       ht = np;
+               }
+       }
+       of_node_put(root);
+
+#ifdef CONFIG_PPC64
+       /* Probe HT last as it relies on the agp resources to be already
+        * setup
+        */
+       if (ht && add_bridge(ht) != 0)
+               of_node_put(ht);
+
+       /*
+        * We need to call pci_setup_phb_io for the HT bridge first
+        * so it gets the I/O port numbers starting at 0, and we
+        * need to call it for the AGP bridge after that so it gets
+        * small positive I/O port numbers.
+        */
+       if (u3_ht)
+               pci_setup_phb_io(u3_ht, 1);
+       if (u3_agp)
+               pci_setup_phb_io(u3_agp, 0);
+
+       /*
+        * On ppc64, fixup the IO resources on our host bridges as
+        * the common code does it only for children of the host bridges
+        */
+       pmac_fixup_phb_resources();
+
+       /* Setup the linkage between OF nodes and PHBs */
+       pci_devs_phb_init();
+
+       /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
+        * assume there is no P2P bridge on the AGP bus, which should be a
+        * safe assumptions hopefully.
+        */
+       if (u3_agp) {
+               struct device_node *np = u3_agp->arch_data;
+               PCI_DN(np)->busno = 0xf0;
+               for (np = np->child; np; np = np->sibling)
+                       PCI_DN(np)->busno = 0xf0;
+       }
+
+       /* map in PCI I/O space */
+       phbs_remap_io();
+
+       /* pmac_check_ht_link(); */
+
+       /* Tell pci.c to not use the common resource allocation mechanism */
+       pci_probe_only = 1;
+
+       /* Allow all IO */
+       io_page_mask = -1;
+
+#else /* CONFIG_PPC64 */
+       init_p2pbridge();
+       fixup_nec_usb2();
+
+       /* We are still having some issues with the Xserve G4, enabling
+        * some offset between bus number and domains for now when we
+        * assign all busses should help for now
+        */
+       if (pci_assign_all_buses)
+               pcibios_assign_bus_offset = 0x10;
+#endif
+}
+
+int
+pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
+{
+       struct device_node* node;
+       int updatecfg = 0;
+       int uninorth_child;
+
+       node = pci_device_to_OF_node(dev);
+
+       /* We don't want to enable USB controllers absent from the OF tree
+        * (iBook second controller)
+        */
+       if (dev->vendor == PCI_VENDOR_ID_APPLE
+           && (dev->class == ((PCI_CLASS_SERIAL_USB << 8) | 0x10))
+           && !node) {
+               printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
+                      pci_name(dev));
+               return -EINVAL;
+       }
+
+       if (!node)
+               return 0;
+
+       uninorth_child = node->parent &&
+               device_is_compatible(node->parent, "uni-north");
+
+       /* Firewire & GMAC were disabled after PCI probe, the driver is
+        * claiming them, we must re-enable them now.
+        */
+       if (uninorth_child && !strcmp(node->name, "firewire") &&
+           (device_is_compatible(node, "pci106b,18") ||
+            device_is_compatible(node, "pci106b,30") ||
+            device_is_compatible(node, "pci11c1,5811"))) {
+               pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1);
+               pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1);
+               updatecfg = 1;
+       }
+       if (uninorth_child && !strcmp(node->name, "ethernet") &&
+           device_is_compatible(node, "gmac")) {
+               pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1);
+               updatecfg = 1;
+       }
+
+       if (updatecfg) {
+               u16 cmd;
+
+               /*
+                * Make sure PCI is correctly configured
+                *
+                * We use old pci_bios versions of the function since, by
+                * default, gmac is not powered up, and so will be absent
+                * from the kernel initial PCI lookup.
+                *
+                * Should be replaced by 2.4 new PCI mechanisms and really
+                * register the device.
+                */
+               pci_read_config_word(dev, PCI_COMMAND, &cmd);
+               cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
+                       | PCI_COMMAND_INVALIDATE;
+               pci_write_config_word(dev, PCI_COMMAND, cmd);
+               pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
+               pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
+                                     L1_CACHE_BYTES >> 2);
+       }
+
+       return 0;
+}
+
+/* We power down some devices after they have been probed. They'll
+ * be powered back on later on
+ */
+void __init pmac_pcibios_after_init(void)
+{
+       struct device_node* nd;
+
+#ifdef CONFIG_BLK_DEV_IDE
+       struct pci_dev *dev = NULL;
+
+       /* OF fails to initialize IDE controllers on macs
+        * (and maybe other machines)
+        *
+        * Ideally, this should be moved to the IDE layer, but we need
+        * to check specifically with Andre Hedrick how to do it cleanly
+        * since the common IDE code seem to care about the fact that the
+        * BIOS may have disabled a controller.
+        *
+        * -- BenH
+        */
+       for_each_pci_dev(dev) {
+               if ((dev->class >> 16) == PCI_BASE_CLASS_STORAGE)
+                       pci_enable_device(dev);
+       }
+#endif /* CONFIG_BLK_DEV_IDE */
+
+       nd = find_devices("firewire");
+       while (nd) {
+               if (nd->parent && (device_is_compatible(nd, "pci106b,18") ||
+                                  device_is_compatible(nd, "pci106b,30") ||
+                                  device_is_compatible(nd, "pci11c1,5811"))
+                   && device_is_compatible(nd->parent, "uni-north")) {
+                       pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0);
+                       pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
+               }
+               nd = nd->next;
+       }
+       nd = find_devices("ethernet");
+       while (nd) {
+               if (nd->parent && device_is_compatible(nd, "gmac")
+                   && device_is_compatible(nd->parent, "uni-north"))
+                       pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
+               nd = nd->next;
+       }
+}
+
+#ifdef CONFIG_PPC32
+void pmac_pci_fixup_cardbus(struct pci_dev* dev)
+{
+       if (_machine != _MACH_Pmac)
+               return;
+       /*
+        * Fix the interrupt routing on the various cardbus bridges
+        * used on powerbooks
+        */
+       if (dev->vendor != PCI_VENDOR_ID_TI)
+               return;
+       if (dev->device == PCI_DEVICE_ID_TI_1130 ||
+           dev->device == PCI_DEVICE_ID_TI_1131) {
+               u8 val;
+               /* Enable PCI interrupt */
+               if (pci_read_config_byte(dev, 0x91, &val) == 0)
+                       pci_write_config_byte(dev, 0x91, val | 0x30);
+               /* Disable ISA interrupt mode */
+               if (pci_read_config_byte(dev, 0x92, &val) == 0)
+                       pci_write_config_byte(dev, 0x92, val & ~0x06);
+       }
+       if (dev->device == PCI_DEVICE_ID_TI_1210 ||
+           dev->device == PCI_DEVICE_ID_TI_1211 ||
+           dev->device == PCI_DEVICE_ID_TI_1410 ||
+           dev->device == PCI_DEVICE_ID_TI_1510) {
+               u8 val;
+               /* 0x8c == TI122X_IRQMUX, 2 says to route the INTA
+                  signal out the MFUNC0 pin */
+               if (pci_read_config_byte(dev, 0x8c, &val) == 0)
+                       pci_write_config_byte(dev, 0x8c, (val & ~0x0f) | 2);
+               /* Disable ISA interrupt mode */
+               if (pci_read_config_byte(dev, 0x92, &val) == 0)
+                       pci_write_config_byte(dev, 0x92, val & ~0x06);
+       }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus);
+
+void pmac_pci_fixup_pciata(struct pci_dev* dev)
+{
+       u8 progif = 0;
+
+       /*
+        * On PowerMacs, we try to switch any PCI ATA controller to
+       * fully native mode
+        */
+       if (_machine != _MACH_Pmac)
+               return;
+       /* Some controllers don't have the class IDE */
+       if (dev->vendor == PCI_VENDOR_ID_PROMISE)
+               switch(dev->device) {
+               case PCI_DEVICE_ID_PROMISE_20246:
+               case PCI_DEVICE_ID_PROMISE_20262:
+               case PCI_DEVICE_ID_PROMISE_20263:
+               case PCI_DEVICE_ID_PROMISE_20265:
+               case PCI_DEVICE_ID_PROMISE_20267:
+               case PCI_DEVICE_ID_PROMISE_20268:
+               case PCI_DEVICE_ID_PROMISE_20269:
+               case PCI_DEVICE_ID_PROMISE_20270:
+               case PCI_DEVICE_ID_PROMISE_20271:
+               case PCI_DEVICE_ID_PROMISE_20275:
+               case PCI_DEVICE_ID_PROMISE_20276:
+               case PCI_DEVICE_ID_PROMISE_20277:
+                       goto good;
+               }
+       /* Others, check PCI class */
+       if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+               return;
+ good:
+       pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
+       if ((progif & 5) != 5) {
+               printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n", pci_name(dev));
+               (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
+               if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
+                   (progif & 5) != 5)
+                       printk(KERN_ERR "Rewrite of PROGIF failed !\n");
+       }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
+#endif
+
+/*
+ * Disable second function on K2-SATA, it's broken
+ * and disable IO BARs on first one
+ */
+static void fixup_k2_sata(struct pci_dev* dev)
+{
+       int i;
+       u16 cmd;
+
+       if (PCI_FUNC(dev->devfn) > 0) {
+               pci_read_config_word(dev, PCI_COMMAND, &cmd);
+               cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+               pci_write_config_word(dev, PCI_COMMAND, cmd);
+               for (i = 0; i < 6; i++) {
+                       dev->resource[i].start = dev->resource[i].end = 0;
+                       dev->resource[i].flags = 0;
+                       pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
+               }
+       } else {
+               pci_read_config_word(dev, PCI_COMMAND, &cmd);
+               cmd &= ~PCI_COMMAND_IO;
+               pci_write_config_word(dev, PCI_COMMAND, cmd);
+               for (i = 0; i < 5; i++) {
+                       dev->resource[i].start = dev->resource[i].end = 0;
+                       dev->resource[i].flags = 0;
+                       pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
+               }
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
+
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
new file mode 100644 (file)
index 0000000..0eca17d
--- /dev/null
@@ -0,0 +1,682 @@
+/*
+ *  Support for the interrupt controllers found on Power Macintosh,
+ *  currently Apple's "Grand Central" interrupt controller in all
+ *  it's incarnations. OpenPIC support used on newer machines is
+ *  in a separate file
+ *
+ *  Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
+ *
+ *  Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sysdev.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/module.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/time.h>
+#include <asm/pmac_feature.h>
+#include <asm/mpic.h>
+
+#include "pmac.h"
+
+/*
+ * XXX this should be in xmon.h, but putting it there means xmon.h
+ * has to include <linux/interrupt.h> (to get irqreturn_t), which
+ * causes all sorts of problems.  -- paulus
+ */
+extern irqreturn_t xmon_irq(int, void *, struct pt_regs *);
+
+#ifdef CONFIG_PPC32
+struct pmac_irq_hw {
+        unsigned int    event;
+        unsigned int    enable;
+        unsigned int    ack;
+        unsigned int    level;
+};
+
+/* Default addresses */
+static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
+        (struct pmac_irq_hw *) 0xf3000020,
+        (struct pmac_irq_hw *) 0xf3000010,
+        (struct pmac_irq_hw *) 0xf4000020,
+        (struct pmac_irq_hw *) 0xf4000010,
+};
+
+#define GC_LEVEL_MASK          0x3ff00000
+#define OHARE_LEVEL_MASK       0x1ff00000
+#define HEATHROW_LEVEL_MASK    0x1ff00000
+
+static int max_irqs;
+static int max_real_irqs;
+static u32 level_mask[4];
+
+static DEFINE_SPINLOCK(pmac_pic_lock);
+
+/* XXX here for now, should move to arch/powerpc/kernel/irq.c */
+int ppc_do_canonicalize_irqs;
+EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
+
+#define GATWICK_IRQ_POOL_SIZE        10
+static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
+
+/*
+ * Mark an irq as "lost".  This is only used on the pmac
+ * since it can lose interrupts (see pmac_set_irq_mask).
+ * -- Cort
+ */
+void
+__set_lost(unsigned long irq_nr, int nokick)
+{
+       if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
+               atomic_inc(&ppc_n_lost_interrupts);
+               if (!nokick)
+                       set_dec(1);
+       }
+}
+
+static void
+pmac_mask_and_ack_irq(unsigned int irq_nr)
+{
+        unsigned long bit = 1UL << (irq_nr & 0x1f);
+        int i = irq_nr >> 5;
+        unsigned long flags;
+
+        if ((unsigned)irq_nr >= max_irqs)
+                return;
+
+        clear_bit(irq_nr, ppc_cached_irq_mask);
+        if (test_and_clear_bit(irq_nr, ppc_lost_interrupts))
+                atomic_dec(&ppc_n_lost_interrupts);
+       spin_lock_irqsave(&pmac_pic_lock, flags);
+        out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
+        out_le32(&pmac_irq_hw[i]->ack, bit);
+        do {
+                /* make sure ack gets to controller before we enable
+                   interrupts */
+                mb();
+        } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
+                != (ppc_cached_irq_mask[i] & bit));
+       spin_unlock_irqrestore(&pmac_pic_lock, flags);
+}
+
+static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
+{
+        unsigned long bit = 1UL << (irq_nr & 0x1f);
+        int i = irq_nr >> 5;
+        unsigned long flags;
+
+        if ((unsigned)irq_nr >= max_irqs)
+                return;
+
+       spin_lock_irqsave(&pmac_pic_lock, flags);
+        /* enable unmasked interrupts */
+        out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
+
+        do {
+                /* make sure mask gets to controller before we
+                   return to user */
+                mb();
+        } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
+                != (ppc_cached_irq_mask[i] & bit));
+
+        /*
+         * Unfortunately, setting the bit in the enable register
+         * when the device interrupt is already on *doesn't* set
+         * the bit in the flag register or request another interrupt.
+         */
+        if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
+               __set_lost((ulong)irq_nr, nokicklost);
+       spin_unlock_irqrestore(&pmac_pic_lock, flags);
+}
+
+/* When an irq gets requested for the first client, if it's an
+ * edge interrupt, we clear any previous one on the controller
+ */
+static unsigned int pmac_startup_irq(unsigned int irq_nr)
+{
+        unsigned long bit = 1UL << (irq_nr & 0x1f);
+        int i = irq_nr >> 5;
+
+       if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
+               out_le32(&pmac_irq_hw[i]->ack, bit);
+        set_bit(irq_nr, ppc_cached_irq_mask);
+        pmac_set_irq_mask(irq_nr, 0);
+
+       return 0;
+}
+
+static void pmac_mask_irq(unsigned int irq_nr)
+{
+        clear_bit(irq_nr, ppc_cached_irq_mask);
+        pmac_set_irq_mask(irq_nr, 0);
+        mb();
+}
+
+static void pmac_unmask_irq(unsigned int irq_nr)
+{
+        set_bit(irq_nr, ppc_cached_irq_mask);
+        pmac_set_irq_mask(irq_nr, 0);
+}
+
+static void pmac_end_irq(unsigned int irq_nr)
+{
+       if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
+           && irq_desc[irq_nr].action) {
+               set_bit(irq_nr, ppc_cached_irq_mask);
+               pmac_set_irq_mask(irq_nr, 1);
+       }
+}
+
+
+struct hw_interrupt_type pmac_pic = {
+       .typename       = " PMAC-PIC ",
+       .startup        = pmac_startup_irq,
+       .enable         = pmac_unmask_irq,
+       .disable        = pmac_mask_irq,
+       .ack            = pmac_mask_and_ack_irq,
+       .end            = pmac_end_irq,
+};
+
+struct hw_interrupt_type gatwick_pic = {
+       .typename       = " GATWICK  ",
+       .startup        = pmac_startup_irq,
+       .enable         = pmac_unmask_irq,
+       .disable        = pmac_mask_irq,
+       .ack            = pmac_mask_and_ack_irq,
+       .end            = pmac_end_irq,
+};
+
+static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
+{
+       int irq, bits;
+
+       for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
+               int i = irq >> 5;
+               bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
+               /* We must read level interrupts from the level register */
+               bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
+               bits &= ppc_cached_irq_mask[i];
+               if (bits == 0)
+                       continue;
+               irq += __ilog2(bits);
+               __do_IRQ(irq, regs);
+               return IRQ_HANDLED;
+       }
+       printk("gatwick irq not from gatwick pic\n");
+       return IRQ_NONE;
+}
+
+int
+pmac_get_irq(struct pt_regs *regs)
+{
+       int irq;
+       unsigned long bits = 0;
+
+#ifdef CONFIG_SMP
+       void psurge_smp_message_recv(struct pt_regs *);
+
+               /* IPI's are a hack on the powersurge -- Cort */
+               if ( smp_processor_id() != 0 ) {
+               psurge_smp_message_recv(regs);
+               return -2;      /* ignore, already handled */
+        }
+#endif /* CONFIG_SMP */
+       for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
+               int i = irq >> 5;
+               bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
+               /* We must read level interrupts from the level register */
+               bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
+               bits &= ppc_cached_irq_mask[i];
+               if (bits == 0)
+                       continue;
+               irq += __ilog2(bits);
+               break;
+       }
+
+       return irq;
+}
+
+/* This routine will fix some missing interrupt values in the device tree
+ * on the gatwick mac-io controller used by some PowerBooks
+ */
+static void __init
+pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
+{
+       struct device_node *node;
+       int count;
+
+       memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool));
+       node = gw->child;
+       count = 0;
+       while(node)
+       {
+               /* Fix SCC */
+               if (strcasecmp(node->name, "escc") == 0)
+                       if (node->child) {
+                               if (node->child->n_intrs < 3) {
+                                       node->child->intrs = &gatwick_int_pool[count];
+                                       count += 3;
+                               }
+                               node->child->n_intrs = 3;
+                               node->child->intrs[0].line = 15+irq_base;
+                               node->child->intrs[1].line =  4+irq_base;
+                               node->child->intrs[2].line =  5+irq_base;
+                               printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n",
+                                       node->child->intrs[0].line,
+                                       node->child->intrs[1].line,
+                                       node->child->intrs[2].line);
+                       }
+               /* Fix media-bay & left SWIM */
+               if (strcasecmp(node->name, "media-bay") == 0) {
+                       struct device_node* ya_node;
+
+                       if (node->n_intrs == 0)
+                               node->intrs = &gatwick_int_pool[count++];
+                       node->n_intrs = 1;
+                       node->intrs[0].line = 29+irq_base;
+                       printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n",
+                                       node->intrs[0].line);
+
+                       ya_node = node->child;
+                       while(ya_node)
+                       {
+                               if (strcasecmp(ya_node->name, "floppy") == 0) {
+                                       if (ya_node->n_intrs < 2) {
+                                               ya_node->intrs = &gatwick_int_pool[count];
+                                               count += 2;
+                                       }
+                                       ya_node->n_intrs = 2;
+                                       ya_node->intrs[0].line = 19+irq_base;
+                                       ya_node->intrs[1].line =  1+irq_base;
+                                       printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n",
+                                               ya_node->intrs[0].line, ya_node->intrs[1].line);
+                               }
+                               if (strcasecmp(ya_node->name, "ata4") == 0) {
+                                       if (ya_node->n_intrs < 2) {
+                                               ya_node->intrs = &gatwick_int_pool[count];
+                                               count += 2;
+                                       }
+                                       ya_node->n_intrs = 2;
+                                       ya_node->intrs[0].line = 14+irq_base;
+                                       ya_node->intrs[1].line =  3+irq_base;
+                                       printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n",
+                                               ya_node->intrs[0].line, ya_node->intrs[1].line);
+                               }
+                               ya_node = ya_node->sibling;
+                       }
+               }
+               node = node->sibling;
+       }
+       if (count > 10) {
+               printk("WARNING !! Gatwick interrupt pool overflow\n");
+               printk("  GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE);
+               printk("              requested = %d\n", count);
+       }
+}
+
+/*
+ * The PowerBook 3400/2400/3500 can have a combo ethernet/modem
+ * card which includes an ohare chip that acts as a second interrupt
+ * controller.  If we find this second ohare, set it up and fix the
+ * interrupt value in the device tree for the ethernet chip.
+ */
+static int __init enable_second_ohare(void)
+{
+       unsigned char bus, devfn;
+       unsigned short cmd;
+        unsigned long addr;
+       struct device_node *irqctrler = find_devices("pci106b,7");
+       struct device_node *ether;
+
+       if (irqctrler == NULL || irqctrler->n_addrs <= 0)
+               return -1;
+       addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40);
+       pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20);
+       max_irqs = 64;
+       if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) {
+               struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler);
+               if (!hose)
+                   printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
+               else {
+                   early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
+                   cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+                   cmd &= ~PCI_COMMAND_IO;
+                   early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
+               }
+       }
+
+       /* Fix interrupt for the modem/ethernet combo controller. The number
+          in the device tree (27) is bogus (correct for the ethernet-only
+          board but not the combo ethernet/modem board).
+          The real interrupt is 28 on the second controller -> 28+32 = 60.
+       */
+       ether = find_devices("pci1011,14");
+       if (ether && ether->n_intrs > 0) {
+               ether->intrs[0].line = 60;
+               printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n",
+                      ether->intrs[0].line);
+       }
+
+       /* Return the interrupt number of the cascade */
+       return irqctrler->intrs[0].line;
+}
+
+#ifdef CONFIG_XMON
+static struct irqaction xmon_action = {
+       .handler        = xmon_irq,
+       .flags          = 0,
+       .mask           = CPU_MASK_NONE,
+       .name           = "NMI - XMON"
+};
+#endif
+
+static struct irqaction gatwick_cascade_action = {
+       .handler        = gatwick_action,
+       .flags          = SA_INTERRUPT,
+       .mask           = CPU_MASK_NONE,
+       .name           = "cascade",
+};
+#endif /* CONFIG_PPC32 */
+
+static int pmac_u3_cascade(struct pt_regs *regs, void *data)
+{
+       return mpic_get_one_irq((struct mpic *)data, regs);
+}
+
+void __init pmac_pic_init(void)
+{
+        struct device_node *irqctrler  = NULL;
+        struct device_node *irqctrler2 = NULL;
+       struct device_node *np;
+#ifdef CONFIG_PPC32
+        int i;
+        unsigned long addr;
+       int irq_cascade = -1;
+#endif
+       struct mpic *mpic1, *mpic2;
+
+       /* We first try to detect Apple's new Core99 chipset, since mac-io
+        * is quite different on those machines and contains an IBM MPIC2.
+        */
+       np = find_type_devices("open-pic");
+       while (np) {
+               if (np->parent && !strcmp(np->parent->name, "u3"))
+                       irqctrler2 = np;
+               else
+                       irqctrler = np;
+               np = np->next;
+       }
+       if (irqctrler != NULL && irqctrler->n_addrs > 0) {
+               unsigned char senses[128];
+
+               printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
+                      (unsigned int)irqctrler->addrs[0].address);
+               pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler, 0, 0);
+
+               prom_get_irq_senses(senses, 0, 128);
+               mpic1 = mpic_alloc(irqctrler->addrs[0].address,
+                                  MPIC_PRIMARY | MPIC_WANTS_RESET,
+                                  0, 0, 128, 252, senses, 128, " OpenPIC  ");
+               BUG_ON(mpic1 == NULL);
+               mpic_init(mpic1);               
+
+               if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
+                   irqctrler2->n_addrs > 0) {
+                       printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
+                              (u32)irqctrler2->addrs[0].address,
+                              irqctrler2->intrs[0].line);
+
+                       pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
+                       prom_get_irq_senses(senses, 128, 128 + 124);
+
+                       /* We don't need to set MPIC_BROKEN_U3 here since we don't have
+                        * hypertransport interrupts routed to it
+                        */
+                       mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
+                                          MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
+                                          0, 128, 124, 0, senses, 124,
+                                          " U3-MPIC  ");
+                       BUG_ON(mpic2 == NULL);
+                       mpic_init(mpic2);
+                       mpic_setup_cascade(irqctrler2->intrs[0].line,
+                                          pmac_u3_cascade, mpic2);
+               }
+#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
+               {
+                       struct device_node* pswitch;
+                       int nmi_irq;
+
+                       pswitch = find_devices("programmer-switch");
+                       if (pswitch && pswitch->n_intrs) {
+                               nmi_irq = pswitch->intrs[0].line;
+                               mpic_irq_set_priority(nmi_irq, 9);
+                               setup_irq(nmi_irq, &xmon_action);
+                       }
+               }
+#endif /* CONFIG_XMON */
+               return;
+       }
+       irqctrler = NULL;
+
+#ifdef CONFIG_PPC32
+       /* Get the level/edge settings, assume if it's not
+        * a Grand Central nor an OHare, then it's an Heathrow
+        * (or Paddington).
+        */
+       ppc_md.get_irq = pmac_get_irq;
+       if (find_devices("gc"))
+               level_mask[0] = GC_LEVEL_MASK;
+       else if (find_devices("ohare")) {
+               level_mask[0] = OHARE_LEVEL_MASK;
+               /* We might have a second cascaded ohare */
+               level_mask[1] = OHARE_LEVEL_MASK;
+       } else {
+               level_mask[0] = HEATHROW_LEVEL_MASK;
+               level_mask[1] = 0;
+               /* We might have a second cascaded heathrow */
+               level_mask[2] = HEATHROW_LEVEL_MASK;
+               level_mask[3] = 0;
+       }
+
+       /*
+        * G3 powermacs and 1999 G3 PowerBooks have 64 interrupts,
+        * 1998 G3 Series PowerBooks have 128,
+        * other powermacs have 32.
+        * The combo ethernet/modem card for the Powerstar powerbooks
+        * (2400/3400/3500, ohare based) has a second ohare chip
+        * effectively making a total of 64.
+        */
+       max_irqs = max_real_irqs = 32;
+       irqctrler = find_devices("mac-io");
+       if (irqctrler)
+       {
+               max_real_irqs = 64;
+               if (irqctrler->next)
+                       max_irqs = 128;
+               else
+                       max_irqs = 64;
+       }
+       for ( i = 0; i < max_real_irqs ; i++ )
+               irq_desc[i].handler = &pmac_pic;
+
+       /* get addresses of first controller */
+       if (irqctrler) {
+               if  (irqctrler->n_addrs > 0) {
+                       addr = (unsigned long)
+                               ioremap(irqctrler->addrs[0].address, 0x40);
+                       for (i = 0; i < 2; ++i)
+                               pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
+                                       (addr + (2 - i) * 0x10);
+               }
+
+               /* get addresses of second controller */
+               irqctrler = irqctrler->next;
+               if (irqctrler && irqctrler->n_addrs > 0) {
+                       addr = (unsigned long)
+                               ioremap(irqctrler->addrs[0].address, 0x40);
+                       for (i = 2; i < 4; ++i)
+                               pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
+                                       (addr + (4 - i) * 0x10);
+                       irq_cascade = irqctrler->intrs[0].line;
+                       if (device_is_compatible(irqctrler, "gatwick"))
+                               pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs);
+               }
+       } else {
+               /* older powermacs have a GC (grand central) or ohare at
+                  f3000000, with interrupt control registers at f3000020. */
+               addr = (unsigned long) ioremap(0xf3000000, 0x40);
+               pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20);
+       }
+
+       /* PowerBooks 3400 and 3500 can have a second controller in a second
+          ohare chip, on the combo ethernet/modem card */
+       if (machine_is_compatible("AAPL,3400/2400")
+            || machine_is_compatible("AAPL,3500"))
+               irq_cascade = enable_second_ohare();
+
+       /* disable all interrupts in all controllers */
+       for (i = 0; i * 32 < max_irqs; ++i)
+               out_le32(&pmac_irq_hw[i]->enable, 0);
+       /* mark level interrupts */
+       for (i = 0; i < max_irqs; i++)
+               if (level_mask[i >> 5] & (1UL << (i & 0x1f)))
+                       irq_desc[i].status = IRQ_LEVEL;
+
+       /* get interrupt line of secondary interrupt controller */
+       if (irq_cascade >= 0) {
+               printk(KERN_INFO "irq: secondary controller on irq %d\n",
+                       (int)irq_cascade);
+               for ( i = max_real_irqs ; i < max_irqs ; i++ )
+                       irq_desc[i].handler = &gatwick_pic;
+               setup_irq(irq_cascade, &gatwick_cascade_action);
+       }
+       printk("System has %d possible interrupts\n", max_irqs);
+       if (max_irqs != max_real_irqs)
+               printk(KERN_DEBUG "%d interrupts on main controller\n",
+                       max_real_irqs);
+
+#ifdef CONFIG_XMON
+       setup_irq(20, &xmon_action);
+#endif /* CONFIG_XMON */
+#endif /* CONFIG_PPC32 */
+}
+
+#ifdef CONFIG_PM
+/*
+ * These procedures are used in implementing sleep on the powerbooks.
+ * sleep_save_intrs() saves the states of all interrupt enables
+ * and disables all interrupts except for the nominated one.
+ * sleep_restore_intrs() restores the states of all interrupt enables.
+ */
+unsigned long sleep_save_mask[2];
+
+/* This used to be passed by the PMU driver but that link got
+ * broken with the new driver model. We use this tweak for now...
+ */
+static int pmacpic_find_viaint(void)
+{
+       int viaint = -1;
+
+#ifdef CONFIG_ADB_PMU
+       struct device_node *np;
+
+       if (pmu_get_model() != PMU_OHARE_BASED)
+               goto not_found;
+       np = of_find_node_by_name(NULL, "via-pmu");
+       if (np == NULL)
+               goto not_found;
+       viaint = np->intrs[0].line;
+#endif /* CONFIG_ADB_PMU */
+
+not_found:
+       return viaint;
+}
+
+static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state)
+{
+       int viaint = pmacpic_find_viaint();
+
+       sleep_save_mask[0] = ppc_cached_irq_mask[0];
+       sleep_save_mask[1] = ppc_cached_irq_mask[1];
+       ppc_cached_irq_mask[0] = 0;
+       ppc_cached_irq_mask[1] = 0;
+       if (viaint > 0)
+               set_bit(viaint, ppc_cached_irq_mask);
+       out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
+       if (max_real_irqs > 32)
+               out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
+       (void)in_le32(&pmac_irq_hw[0]->event);
+       /* make sure mask gets to controller before we return to caller */
+       mb();
+        (void)in_le32(&pmac_irq_hw[0]->enable);
+
+        return 0;
+}
+
+static int pmacpic_resume(struct sys_device *sysdev)
+{
+       int i;
+
+       out_le32(&pmac_irq_hw[0]->enable, 0);
+       if (max_real_irqs > 32)
+               out_le32(&pmac_irq_hw[1]->enable, 0);
+       mb();
+       for (i = 0; i < max_real_irqs; ++i)
+               if (test_bit(i, sleep_save_mask))
+                       pmac_unmask_irq(i);
+
+       return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static struct sysdev_class pmacpic_sysclass = {
+       set_kset_name("pmac_pic"),
+};
+
+static struct sys_device device_pmacpic = {
+       .id             = 0,
+       .cls            = &pmacpic_sysclass,
+};
+
+static struct sysdev_driver driver_pmacpic = {
+#ifdef CONFIG_PM
+       .suspend        = &pmacpic_suspend,
+       .resume         = &pmacpic_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init init_pmacpic_sysfs(void)
+{
+#ifdef CONFIG_PPC32
+       if (max_irqs == 0)
+               return -ENODEV;
+#endif
+       printk(KERN_DEBUG "Registering pmac pic with sysfs...\n");
+       sysdev_class_register(&pmacpic_sysclass);
+       sysdev_register(&device_pmacpic);
+       sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic);
+       return 0;
+}
+
+subsys_initcall(init_pmacpic_sysfs);
+
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h
new file mode 100644 (file)
index 0000000..664103d
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef __PPC_PLATFORMS_PMAC_PIC_H
+#define __PPC_PLATFORMS_PMAC_PIC_H
+
+#include <linux/irq.h>
+
+extern struct hw_interrupt_type pmac_pic;
+
+void pmac_pic_init(void);
+int pmac_get_irq(struct pt_regs *regs);
+
+#endif /* __PPC_PLATFORMS_PMAC_PIC_H */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
new file mode 100644 (file)
index 0000000..2ad25e1
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef __PMAC_H__
+#define __PMAC_H__
+
+#include <linux/pci.h>
+#include <linux/ide.h>
+#include <linux/irq.h>
+
+/*
+ * Declaration for the various functions exported by the
+ * pmac_* files. Mostly for use by pmac_setup
+ */
+
+struct rtc_time;
+
+extern long pmac_time_init(void);
+extern unsigned long pmac_get_boot_time(void);
+extern void pmac_get_rtc_time(struct rtc_time *);
+extern int pmac_set_rtc_time(struct rtc_time *);
+extern void pmac_read_rtc_time(void);
+extern void pmac_calibrate_decr(void);
+extern void pmac_pcibios_fixup(void);
+extern void pmac_pci_init(void);
+extern unsigned long pmac_ide_get_base(int index);
+extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
+       unsigned long data_port, unsigned long ctrl_port, int *irq);
+
+extern void pmac_nvram_update(void);
+extern unsigned char pmac_nvram_read_byte(int addr);
+extern void pmac_nvram_write_byte(int addr, unsigned char val);
+extern int pmac_pci_enable_device_hook(struct pci_dev *dev, int initial);
+extern void pmac_pcibios_after_init(void);
+extern int of_show_percpuinfo(struct seq_file *m, int i);
+
+extern void pmac_pci_init(void);
+extern void pmac_setup_pci_dma(void);
+extern void pmac_check_ht_link(void);
+
+extern void pmac_setup_smp(void);
+
+extern unsigned long pmac_ide_get_base(int index);
+extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
+       unsigned long data_port, unsigned long ctrl_port, int *irq);
+
+extern int pmac_nvram_init(void);
+
+extern struct hw_interrupt_type pmac_pic;
+
+void pmac_pic_init(void);
+int pmac_get_irq(struct pt_regs *regs);
+
+#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
new file mode 100644 (file)
index 0000000..50f5dd7
--- /dev/null
@@ -0,0 +1,789 @@
+/*
+ *  Powermac setup and early boot code plus other random bits.
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Adapted for Power Macintosh by Paul Mackerras
+ *    Copyright (C) 1996 Paul Mackerras (paulus@samba.org)
+ *
+ *  Derived from "arch/alpha/kernel/setup.c"
+ *    Copyright (C) 1995 Linus Torvalds
+ *
+ *  Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+/*
+ * bootup setup stuff..
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/major.h>
+#include <linux/initrd.h>
+#include <linux/vt_kern.h>
+#include <linux/console.h>
+#include <linux/ide.h>
+#include <linux/pci.h>
+#include <linux/adb.h>
+#include <linux/cuda.h>
+#include <linux/pmu.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/bitops.h>
+#include <linux/suspend.h>
+
+#include <asm/reg.h>
+#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/pci-bridge.h>
+#include <asm/ohare.h>
+#include <asm/mediabay.h>
+#include <asm/machdep.h>
+#include <asm/dma.h>
+#include <asm/cputable.h>
+#include <asm/btext.h>
+#include <asm/pmac_feature.h>
+#include <asm/time.h>
+#include <asm/of_device.h>
+#include <asm/mmu_context.h>
+#include <asm/iommu.h>
+#include <asm/smu.h>
+#include <asm/pmc.h>
+#include <asm/mpic.h>
+
+#include "pmac.h"
+
+#undef SHOW_GATWICK_IRQS
+
+unsigned char drive_info;
+
+int ppc_override_l2cr = 0;
+int ppc_override_l2cr_value;
+int has_l2cache = 0;
+
+int pmac_newworld = 1;
+
+static int current_root_goodness = -1;
+
+extern int pmac_newworld;
+extern struct machdep_calls pmac_md;
+
+#define DEFAULT_ROOT_DEVICE Root_SDA1  /* sda1 - slightly silly choice */
+
+#ifdef CONFIG_PPC64
+#include <asm/udbg.h>
+int sccdbg;
+#endif
+
+extern void zs_kgdb_hook(int tty_num);
+
+sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
+EXPORT_SYMBOL(sys_ctrler);
+
+#ifdef CONFIG_PMAC_SMU
+unsigned long smu_cmdbuf_abs;
+EXPORT_SYMBOL(smu_cmdbuf_abs);
+#endif
+
+#ifdef CONFIG_SMP
+extern struct smp_ops_t psurge_smp_ops;
+extern struct smp_ops_t core99_smp_ops;
+#endif /* CONFIG_SMP */
+
+static void pmac_show_cpuinfo(struct seq_file *m)
+{
+       struct device_node *np;
+       char *pp;
+       int plen;
+       int mbmodel;
+       unsigned int mbflags;
+       char* mbname;
+
+       mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
+                                   PMAC_MB_INFO_MODEL, 0);
+       mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
+                                   PMAC_MB_INFO_FLAGS, 0);
+       if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
+                             (long) &mbname) != 0)
+               mbname = "Unknown";
+
+       /* find motherboard type */
+       seq_printf(m, "machine\t\t: ");
+       np = of_find_node_by_path("/");
+       if (np != NULL) {
+               pp = (char *) get_property(np, "model", NULL);
+               if (pp != NULL)
+                       seq_printf(m, "%s\n", pp);
+               else
+                       seq_printf(m, "PowerMac\n");
+               pp = (char *) get_property(np, "compatible", &plen);
+               if (pp != NULL) {
+                       seq_printf(m, "motherboard\t:");
+                       while (plen > 0) {
+                               int l = strlen(pp) + 1;
+                               seq_printf(m, " %s", pp);
+                               plen -= l;
+                               pp += l;
+                       }
+                       seq_printf(m, "\n");
+               }
+               of_node_put(np);
+       } else
+               seq_printf(m, "PowerMac\n");
+
+       /* print parsed model */
+       seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
+       seq_printf(m, "pmac flags\t: %08x\n", mbflags);
+
+       /* find l2 cache info */
+       np = of_find_node_by_name(NULL, "l2-cache");
+       if (np == NULL)
+               np = of_find_node_by_type(NULL, "cache");
+       if (np != NULL) {
+               unsigned int *ic = (unsigned int *)
+                       get_property(np, "i-cache-size", NULL);
+               unsigned int *dc = (unsigned int *)
+                       get_property(np, "d-cache-size", NULL);
+               seq_printf(m, "L2 cache\t:");
+               has_l2cache = 1;
+               if (get_property(np, "cache-unified", NULL) != 0 && dc) {
+                       seq_printf(m, " %dK unified", *dc / 1024);
+               } else {
+                       if (ic)
+                               seq_printf(m, " %dK instruction", *ic / 1024);
+                       if (dc)
+                               seq_printf(m, "%s %dK data",
+                                          (ic? " +": ""), *dc / 1024);
+               }
+               pp = get_property(np, "ram-type", NULL);
+               if (pp)
+                       seq_printf(m, " %s", pp);
+               seq_printf(m, "\n");
+               of_node_put(np);
+       }
+
+       /* Indicate newworld/oldworld */
+       seq_printf(m, "pmac-generation\t: %s\n",
+                  pmac_newworld ? "NewWorld" : "OldWorld");
+}
+
+static void pmac_show_percpuinfo(struct seq_file *m, int i)
+{
+#ifdef CONFIG_CPU_FREQ_PMAC
+       extern unsigned int pmac_get_one_cpufreq(int i);
+       unsigned int freq = pmac_get_one_cpufreq(i);
+       if (freq != 0) {
+               seq_printf(m, "clock\t\t: %dMHz\n", freq/1000);
+               return;
+       }
+#endif /* CONFIG_CPU_FREQ_PMAC */
+#ifdef CONFIG_PPC32
+       of_show_percpuinfo(m, i);
+#endif
+}
+
+#ifndef CONFIG_ADB_CUDA
+int find_via_cuda(void)
+{
+       if (!find_devices("via-cuda"))
+               return 0;
+       printk("WARNING ! Your machine is CUDA-based but your kernel\n");
+       printk("          wasn't compiled with CONFIG_ADB_CUDA option !\n");
+       return 0;
+}
+#endif
+
+#ifndef CONFIG_ADB_PMU
+int find_via_pmu(void)
+{
+       if (!find_devices("via-pmu"))
+               return 0;
+       printk("WARNING ! Your machine is PMU-based but your kernel\n");
+       printk("          wasn't compiled with CONFIG_ADB_PMU option !\n");
+       return;
+}
+#endif
+
+#ifndef CONFIG_PMAC_SMU
+int smu_init(void)
+{
+       /* should check and warn if SMU is present */
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PPC32
+static volatile u32 *sysctrl_regs;
+
+static void __init ohare_init(void)
+{
+       /* this area has the CPU identification register
+          and some registers used by smp boards */
+       sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
+
+       /*
+        * Turn on the L2 cache.
+        * We assume that we have a PSX memory controller iff
+        * we have an ohare I/O controller.
+        */
+       if (find_devices("ohare") != NULL) {
+               if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
+                       if (sysctrl_regs[4] & 0x10)
+                               sysctrl_regs[4] |= 0x04000020;
+                       else
+                               sysctrl_regs[4] |= 0x04000000;
+                       if(has_l2cache)
+                               printk(KERN_INFO "Level 2 cache enabled\n");
+               }
+       }
+}
+
+static void __init l2cr_init(void)
+{
+       /* Checks "l2cr-value" property in the registry */
+       if (cpu_has_feature(CPU_FTR_L2CR)) {
+               struct device_node *np = find_devices("cpus");
+               if (np == 0)
+                       np = find_type_devices("cpu");
+               if (np != 0) {
+                       unsigned int *l2cr = (unsigned int *)
+                               get_property(np, "l2cr-value", NULL);
+                       if (l2cr != 0) {
+                               ppc_override_l2cr = 1;
+                               ppc_override_l2cr_value = *l2cr;
+                               _set_L2CR(0);
+                               _set_L2CR(ppc_override_l2cr_value);
+                       }
+               }
+       }
+
+       if (ppc_override_l2cr)
+               printk(KERN_INFO "L2CR overridden (0x%x), "
+                      "backside cache is %s\n",
+                      ppc_override_l2cr_value,
+                      (ppc_override_l2cr_value & 0x80000000)
+                               ? "enabled" : "disabled");
+}
+#endif
+
+void __init pmac_setup_arch(void)
+{
+       struct device_node *cpu;
+       int *fp;
+       unsigned long pvr;
+
+       pvr = PVR_VER(mfspr(SPRN_PVR));
+
+       /* Set loops_per_jiffy to a half-way reasonable value,
+          for use until calibrate_delay gets called. */
+       loops_per_jiffy = 50000000 / HZ;
+       cpu = of_find_node_by_type(NULL, "cpu");
+       if (cpu != NULL) {
+               fp = (int *) get_property(cpu, "clock-frequency", NULL);
+               if (fp != NULL) {
+                       if (pvr >= 0x30 && pvr < 0x80)
+                               /* PPC970 etc. */
+                               loops_per_jiffy = *fp / (3 * HZ);
+                       else if (pvr == 4 || pvr >= 8)
+                               /* 604, G3, G4 etc. */
+                               loops_per_jiffy = *fp / HZ;
+                       else
+                               /* 601, 603, etc. */
+                               loops_per_jiffy = *fp / (2 * HZ);
+               }
+               of_node_put(cpu);
+       }
+
+       /* Lookup PCI hosts */
+       pmac_pci_init();
+
+#ifdef CONFIG_PPC32
+       ohare_init();
+       l2cr_init();
+#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PPC64
+       /* Probe motherboard chipset */
+       /* this is done earlier in setup_arch for 32-bit */
+       pmac_feature_init();
+
+       /* We can NAP */
+       powersave_nap = 1;
+       printk(KERN_INFO "Using native/NAP idle loop\n");
+#endif
+
+#ifdef CONFIG_KGDB
+       zs_kgdb_hook(0);
+#endif
+
+       find_via_cuda();
+       find_via_pmu();
+       smu_init();
+
+#ifdef CONFIG_NVRAM
+       pmac_nvram_init();
+#endif
+
+#ifdef CONFIG_PPC32
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (initrd_start)
+               ROOT_DEV = Root_RAM0;
+       else
+#endif
+               ROOT_DEV = DEFAULT_ROOT_DEVICE;
+#endif
+
+#ifdef CONFIG_SMP
+       /* Check for Core99 */
+       if (find_devices("uni-n") || find_devices("u3"))
+               smp_ops = &core99_smp_ops;
+#ifdef CONFIG_PPC32
+       else
+               smp_ops = &psurge_smp_ops;
+#endif
+#endif /* CONFIG_SMP */
+}
+
+char *bootpath;
+char *bootdevice;
+void *boot_host;
+int boot_target;
+int boot_part;
+extern dev_t boot_dev;
+
+#ifdef CONFIG_SCSI
+void __init note_scsi_host(struct device_node *node, void *host)
+{
+       int l;
+       char *p;
+
+       l = strlen(node->full_name);
+       if (bootpath != NULL && bootdevice != NULL
+           && strncmp(node->full_name, bootdevice, l) == 0
+           && (bootdevice[l] == '/' || bootdevice[l] == 0)) {
+               boot_host = host;
+               /*
+                * There's a bug in OF 1.0.5.  (Why am I not surprised.)
+                * If you pass a path like scsi/sd@1:0 to canon, it returns
+                * something like /bandit@F2000000/gc@10/53c94@10000/sd@0,0
+                * That is, the scsi target number doesn't get preserved.
+                * So we pick the target number out of bootpath and use that.
+                */
+               p = strstr(bootpath, "/sd@");
+               if (p != NULL) {
+                       p += 4;
+                       boot_target = simple_strtoul(p, NULL, 10);
+                       p = strchr(p, ':');
+                       if (p != NULL)
+                               boot_part = simple_strtoul(p + 1, NULL, 10);
+               }
+       }
+}
+EXPORT_SYMBOL(note_scsi_host);
+#endif
+
+#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
+static dev_t __init find_ide_boot(void)
+{
+       char *p;
+       int n;
+       dev_t __init pmac_find_ide_boot(char *bootdevice, int n);
+
+       if (bootdevice == NULL)
+               return 0;
+       p = strrchr(bootdevice, '/');
+       if (p == NULL)
+               return 0;
+       n = p - bootdevice;
+
+       return pmac_find_ide_boot(bootdevice, n);
+}
+#endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */
+
+static void __init find_boot_device(void)
+{
+#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
+       boot_dev = find_ide_boot();
+#endif
+}
+
+/* TODO: Merge the suspend-to-ram with the common code !!!
+ * currently, this is a stub implementation for suspend-to-disk
+ * only
+ */
+
+#ifdef CONFIG_SOFTWARE_SUSPEND
+
+static int pmac_pm_prepare(suspend_state_t state)
+{
+       printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
+
+       return 0;
+}
+
+static int pmac_pm_enter(suspend_state_t state)
+{
+       printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
+
+       /* Giveup the lazy FPU & vec so we don't have to back them
+        * up from the low level code
+        */
+       enable_kernel_fp();
+
+#ifdef CONFIG_ALTIVEC
+       if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
+               enable_kernel_altivec();
+#endif /* CONFIG_ALTIVEC */
+
+       return 0;
+}
+
+static int pmac_pm_finish(suspend_state_t state)
+{
+       printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
+
+       /* Restore userland MMU context */
+       set_context(current->active_mm->context, current->active_mm->pgd);
+
+       return 0;
+}
+
+static struct pm_ops pmac_pm_ops = {
+       .pm_disk_mode   = PM_DISK_SHUTDOWN,
+       .prepare        = pmac_pm_prepare,
+       .enter          = pmac_pm_enter,
+       .finish         = pmac_pm_finish,
+};
+
+#endif /* CONFIG_SOFTWARE_SUSPEND */
+
+static int initializing = 1;
+
+static int pmac_late_init(void)
+{
+       initializing = 0;
+#ifdef CONFIG_SOFTWARE_SUSPEND
+       pm_set_ops(&pmac_pm_ops);
+#endif /* CONFIG_SOFTWARE_SUSPEND */
+       return 0;
+}
+
+late_initcall(pmac_late_init);
+
+/* can't be __init - can be called whenever a disk is first accessed */
+void note_bootable_part(dev_t dev, int part, int goodness)
+{
+       static int found_boot = 0;
+       char *p;
+
+       if (!initializing)
+               return;
+       if ((goodness <= current_root_goodness) &&
+           ROOT_DEV != DEFAULT_ROOT_DEVICE)
+               return;
+       p = strstr(saved_command_line, "root=");
+       if (p != NULL && (p == saved_command_line || p[-1] == ' '))
+               return;
+
+       if (!found_boot) {
+               find_boot_device();
+               found_boot = 1;
+       }
+       if (!boot_dev || dev == boot_dev) {
+               ROOT_DEV = dev + part;
+               boot_dev = 0;
+               current_root_goodness = goodness;
+       }
+}
+
+#ifdef CONFIG_ADB_CUDA
+static void cuda_restart(void)
+{
+       struct adb_request req;
+
+       cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
+       for (;;)
+               cuda_poll();
+}
+
+static void cuda_shutdown(void)
+{
+       struct adb_request req;
+
+       cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN);
+       for (;;)
+               cuda_poll();
+}
+
+#else
+#define cuda_restart()
+#define cuda_shutdown()
+#endif
+
+#ifndef CONFIG_ADB_PMU
+#define pmu_restart()
+#define pmu_shutdown()
+#endif
+
+#ifndef CONFIG_PMAC_SMU
+#define smu_restart()
+#define smu_shutdown()
+#endif
+
+static void pmac_restart(char *cmd)
+{
+       switch (sys_ctrler) {
+       case SYS_CTRLER_CUDA:
+               cuda_restart();
+               break;
+       case SYS_CTRLER_PMU:
+               pmu_restart();
+               break;
+       case SYS_CTRLER_SMU:
+               smu_restart();
+               break;
+       default: ;
+       }
+}
+
+static void pmac_power_off(void)
+{
+       switch (sys_ctrler) {
+       case SYS_CTRLER_CUDA:
+               cuda_shutdown();
+               break;
+       case SYS_CTRLER_PMU:
+               pmu_shutdown();
+               break;
+       case SYS_CTRLER_SMU:
+               smu_shutdown();
+               break;
+       default: ;
+       }
+}
+
+static void
+pmac_halt(void)
+{
+       pmac_power_off();
+}
+
+#ifdef CONFIG_PPC32
+void __init pmac_init(void)
+{
+       /* isa_io_base gets set in pmac_pci_init */
+       isa_mem_base = PMAC_ISA_MEM_BASE;
+       pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
+       ISA_DMA_THRESHOLD = ~0L;
+       DMA_MODE_READ = 1;
+       DMA_MODE_WRITE = 2;
+
+       ppc_md = pmac_md;
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
+        ppc_ide_md.ide_init_hwif       = pmac_ide_init_hwif_ports;
+        ppc_ide_md.default_io_base     = pmac_ide_get_base;
+#endif /* CONFIG_BLK_DEV_IDE_PMAC */
+#endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */
+
+       if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0);
+
+}
+#endif
+
+/* 
+ * Early initialization.
+ */
+static void __init pmac_init_early(void)
+{
+#ifdef CONFIG_PPC64
+       /* Initialize hash table, from now on, we can take hash faults
+        * and call ioremap
+        */
+       hpte_init_native();
+
+       /* Init SCC */
+       if (strstr(cmd_line, "sccdbg")) {
+               sccdbg = 1;
+               udbg_init_scc(NULL);
+       }
+
+       /* Setup interrupt mapping options */
+       ppc64_interrupt_controller = IC_OPEN_PIC;
+
+       iommu_init_early_u3();
+#endif
+}
+
+static void __init pmac_progress(char *s, unsigned short hex)
+{
+#ifdef CONFIG_PPC64
+       if (sccdbg) {
+               udbg_puts(s);
+               udbg_puts("\n");
+               return;
+       }
+#endif
+#ifdef CONFIG_BOOTX_TEXT
+       if (boot_text_mapped) {
+               btext_drawstring(s);
+               btext_drawchar('\n');
+       }
+#endif /* CONFIG_BOOTX_TEXT */
+}
+
+/*
+ * pmac has no legacy IO, anything calling this function has to
+ * fail or bad things will happen
+ */
+static int pmac_check_legacy_ioport(unsigned int baseport)
+{
+       return -ENODEV;
+}
+
+static int __init pmac_declare_of_platform_devices(void)
+{
+       struct device_node *np, *npp;
+
+       np = find_devices("uni-n");
+       if (np) {
+               for (np = np->child; np != NULL; np = np->sibling)
+                       if (strncmp(np->name, "i2c", 3) == 0) {
+                               of_platform_device_create(np, "uni-n-i2c",
+                                                         NULL);
+                               break;
+                       }
+       }
+       np = find_devices("valkyrie");
+       if (np)
+               of_platform_device_create(np, "valkyrie", NULL);
+       np = find_devices("platinum");
+       if (np)
+               of_platform_device_create(np, "platinum", NULL);
+
+       npp = of_find_node_by_name(NULL, "u3");
+       if (npp) {
+               for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
+                       if (strncmp(np->name, "i2c", 3) == 0) {
+                               of_platform_device_create(np, "u3-i2c", NULL);
+                               of_node_put(np);
+                               break;
+                       }
+               }
+               of_node_put(npp);
+       }
+        np = of_find_node_by_type(NULL, "smu");
+        if (np) {
+               of_platform_device_create(np, "smu", NULL);
+               of_node_put(np);
+       }
+
+       return 0;
+}
+
+device_initcall(pmac_declare_of_platform_devices);
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init pmac_probe(int platform)
+{
+#ifdef CONFIG_PPC64
+       if (platform != PLATFORM_POWERMAC)
+               return 0;
+
+       /*
+        * On U3, the DART (iommu) must be allocated now since it
+        * has an impact on htab_initialize (due to the large page it
+        * occupies having to be broken up so the DART itself is not
+        * part of the cacheable linar mapping
+        */
+       alloc_u3_dart_table();
+#endif
+
+#ifdef CONFIG_PMAC_SMU
+       /*
+        * SMU based G5s need some memory below 2Gb, at least the current
+        * driver needs that. We have to allocate it now. We allocate 4k
+        * (1 small page) for now.
+        */
+       smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
+#endif /* CONFIG_PMAC_SMU */
+
+       return 1;
+}
+
+#ifdef CONFIG_PPC64
+static int pmac_probe_mode(struct pci_bus *bus)
+{
+       struct device_node *node = bus->sysdata;
+
+       /* We need to use normal PCI probing for the AGP bus,
+          since the device for the AGP bridge isn't in the tree. */
+       if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
+               return PCI_PROBE_NORMAL;
+
+       return PCI_PROBE_DEVTREE;
+}
+#endif
+
+struct machdep_calls __initdata pmac_md = {
+#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64)
+       .cpu_die                = generic_mach_cpu_die,
+#endif
+       .probe                  = pmac_probe,
+       .setup_arch             = pmac_setup_arch,
+       .init_early             = pmac_init_early,
+       .show_cpuinfo           = pmac_show_cpuinfo,
+       .show_percpuinfo        = pmac_show_percpuinfo,
+       .init_IRQ               = pmac_pic_init,
+       .get_irq                = mpic_get_irq, /* changed later */
+       .pcibios_fixup          = pmac_pcibios_fixup,
+       .restart                = pmac_restart,
+       .power_off              = pmac_power_off,
+       .halt                   = pmac_halt,
+       .time_init              = pmac_time_init,
+       .get_boot_time          = pmac_get_boot_time,
+       .set_rtc_time           = pmac_set_rtc_time,
+       .get_rtc_time           = pmac_get_rtc_time,
+       .calibrate_decr         = pmac_calibrate_decr,
+       .feature_call           = pmac_do_feature_call,
+       .check_legacy_ioport    = pmac_check_legacy_ioport,
+       .progress               = pmac_progress,
+#ifdef CONFIG_PPC64
+       .pci_probe_mode         = pmac_probe_mode,
+       .idle_loop              = native_idle,
+       .enable_pmcs            = power4_enable_pmcs,
+#endif
+#ifdef CONFIG_PPC32
+       .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
+       .pcibios_after_init     = pmac_pcibios_after_init,
+       .phys_mem_access_prot   = pci_phys_mem_access_prot,
+#endif
+};
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
new file mode 100644 (file)
index 0000000..22b113d
--- /dev/null
@@ -0,0 +1,396 @@
+/*
+ * This file contains sleep low-level functions for PowerBook G3.
+ *    Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *    and Paul Mackerras (paulus@samba.org).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+#include <asm/cputable.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+#define MAGIC  0x4c617273      /* 'Lars' */
+
+/*
+ * Structure for storing CPU registers on the stack.
+ */
+#define SL_SP          0
+#define SL_PC          4
+#define SL_MSR         8
+#define SL_SDR1                0xc
+#define SL_SPRG0       0x10    /* 4 sprg's */
+#define SL_DBAT0       0x20
+#define SL_IBAT0       0x28
+#define SL_DBAT1       0x30
+#define SL_IBAT1       0x38
+#define SL_DBAT2       0x40
+#define SL_IBAT2       0x48
+#define SL_DBAT3       0x50
+#define SL_IBAT3       0x58
+#define SL_TB          0x60
+#define SL_R2          0x68
+#define SL_CR          0x6c
+#define SL_R12         0x70    /* r12 to r31 */
+#define SL_SIZE                (SL_R12 + 80)
+
+       .section .text
+       .align  5
+
+#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC)
+
+/* This gets called by via-pmu.c late during the sleep process.
+ * The PMU was already send the sleep command and will shut us down
+ * soon. We need to save all that is needed and setup the wakeup
+ * vector that will be called by the ROM on wakeup
+ */
+_GLOBAL(low_sleep_handler)
+#ifndef CONFIG_6xx
+       blr
+#else
+       mflr    r0
+       stw     r0,4(r1)
+       stwu    r1,-SL_SIZE(r1)
+       mfcr    r0
+       stw     r0,SL_CR(r1)
+       stw     r2,SL_R2(r1)
+       stmw    r12,SL_R12(r1)
+
+       /* Save MSR & SDR1 */
+       mfmsr   r4
+       stw     r4,SL_MSR(r1)
+       mfsdr1  r4
+       stw     r4,SL_SDR1(r1)
+
+       /* Get a stable timebase and save it */
+1:     mftbu   r4
+       stw     r4,SL_TB(r1)
+       mftb    r5
+       stw     r5,SL_TB+4(r1)
+       mftbu   r3
+       cmpw    r3,r4
+       bne     1b
+
+       /* Save SPRGs */
+       mfsprg  r4,0
+       stw     r4,SL_SPRG0(r1)
+       mfsprg  r4,1
+       stw     r4,SL_SPRG0+4(r1)
+       mfsprg  r4,2
+       stw     r4,SL_SPRG0+8(r1)
+       mfsprg  r4,3
+       stw     r4,SL_SPRG0+12(r1)
+
+       /* Save BATs */
+       mfdbatu r4,0
+       stw     r4,SL_DBAT0(r1)
+       mfdbatl r4,0
+       stw     r4,SL_DBAT0+4(r1)
+       mfdbatu r4,1
+       stw     r4,SL_DBAT1(r1)
+       mfdbatl r4,1
+       stw     r4,SL_DBAT1+4(r1)
+       mfdbatu r4,2
+       stw     r4,SL_DBAT2(r1)
+       mfdbatl r4,2
+       stw     r4,SL_DBAT2+4(r1)
+       mfdbatu r4,3
+       stw     r4,SL_DBAT3(r1)
+       mfdbatl r4,3
+       stw     r4,SL_DBAT3+4(r1)
+       mfibatu r4,0
+       stw     r4,SL_IBAT0(r1)
+       mfibatl r4,0
+       stw     r4,SL_IBAT0+4(r1)
+       mfibatu r4,1
+       stw     r4,SL_IBAT1(r1)
+       mfibatl r4,1
+       stw     r4,SL_IBAT1+4(r1)
+       mfibatu r4,2
+       stw     r4,SL_IBAT2(r1)
+       mfibatl r4,2
+       stw     r4,SL_IBAT2+4(r1)
+       mfibatu r4,3
+       stw     r4,SL_IBAT3(r1)
+       mfibatl r4,3
+       stw     r4,SL_IBAT3+4(r1)
+
+       /* Backup various CPU config stuffs */
+       bl      __save_cpu_setup
+
+       /* The ROM can wake us up via 2 different vectors:
+        *  - On wallstreet & lombard, we must write a magic
+        *    value 'Lars' at address 4 and a pointer to a
+        *    memory location containing the PC to resume from
+        *    at address 0.
+        *  - On Core99, we must store the wakeup vector at
+        *    address 0x80 and eventually it's parameters
+        *    at address 0x84. I've have some trouble with those
+        *    parameters however and I no longer use them.
+        */
+       lis     r5,grackle_wake_up@ha
+       addi    r5,r5,grackle_wake_up@l
+       tophys(r5,r5)
+       stw     r5,SL_PC(r1)
+       lis     r4,KERNELBASE@h
+       tophys(r5,r1)
+       addi    r5,r5,SL_PC
+       lis     r6,MAGIC@ha
+       addi    r6,r6,MAGIC@l
+       stw     r5,0(r4)
+       stw     r6,4(r4)
+       /* Setup stuffs at 0x80-0x84 for Core99 */
+       lis     r3,core99_wake_up@ha
+       addi    r3,r3,core99_wake_up@l
+       tophys(r3,r3)
+       stw     r3,0x80(r4)
+       stw     r5,0x84(r4)
+       /* Store a pointer to our backup storage into
+        * a kernel global
+        */
+       lis r3,sleep_storage@ha
+       addi r3,r3,sleep_storage@l
+       stw r5,0(r3)
+
+       .globl  low_cpu_die
+low_cpu_die:
+       /* Flush & disable all caches */
+       bl      flush_disable_caches
+
+       /* Turn off data relocation. */
+       mfmsr   r3              /* Save MSR in r7 */
+       rlwinm  r3,r3,0,28,26   /* Turn off DR bit */
+       sync
+       mtmsr   r3
+       isync
+
+BEGIN_FTR_SECTION
+       /* Flush any pending L2 data prefetches to work around HW bug */
+       sync
+       lis     r3,0xfff0
+       lwz     r0,0(r3)        /* perform cache-inhibited load to ROM */
+       sync                    /* (caches are disabled at this point) */
+END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
+
+/*
+ * Set the HID0 and MSR for sleep.
+ */
+       mfspr   r2,SPRN_HID0
+       rlwinm  r2,r2,0,10,7    /* clear doze, nap */
+       oris    r2,r2,HID0_SLEEP@h
+       sync
+       isync
+       mtspr   SPRN_HID0,r2
+       sync
+
+/* This loop puts us back to sleep in case we have a spurrious
+ * wakeup so that the host bridge properly stays asleep. The
+ * CPU will be turned off, either after a known time (about 1
+ * second) on wallstreet & lombard, or as soon as the CPU enters
+ * SLEEP mode on core99
+ */
+       mfmsr   r2
+       oris    r2,r2,MSR_POW@h
+1:     sync
+       mtmsr   r2
+       isync
+       b       1b
+
+/*
+ * Here is the resume code.
+ */
+
+
+/*
+ * Core99 machines resume here
+ * r4 has the physical address of SL_PC(sp) (unused)
+ */
+_GLOBAL(core99_wake_up)
+       /* Make sure HID0 no longer contains any sleep bit and that data cache
+        * is disabled
+        */
+       mfspr   r3,SPRN_HID0
+       rlwinm  r3,r3,0,11,7            /* clear SLEEP, NAP, DOZE bits */
+       rlwinm  3,r3,0,18,15            /* clear DCE, ICE */
+       mtspr   SPRN_HID0,r3
+       sync
+       isync
+
+       /* sanitize MSR */
+       mfmsr   r3
+       ori     r3,r3,MSR_EE|MSR_IP
+       xori    r3,r3,MSR_EE|MSR_IP
+       sync
+       isync
+       mtmsr   r3
+       sync
+       isync
+
+       /* Recover sleep storage */
+       lis     r3,sleep_storage@ha
+       addi    r3,r3,sleep_storage@l
+       tophys(r3,r3)
+       lwz     r1,0(r3)
+
+       /* Pass thru to older resume code ... */
+/*
+ * Here is the resume code for older machines.
+ * r1 has the physical address of SL_PC(sp).
+ */
+
+grackle_wake_up:
+
+       /* Restore the kernel's segment registers before
+        * we do any r1 memory access as we are not sure they
+        * are in a sane state above the first 256Mb region
+        */
+       li      r0,16           /* load up segment register values */
+       mtctr   r0              /* for context 0 */
+       lis     r3,0x2000       /* Ku = 1, VSID = 0 */
+       li      r4,0
+3:     mtsrin  r3,r4
+       addi    r3,r3,0x111     /* increment VSID */
+       addis   r4,r4,0x1000    /* address of next segment */
+       bdnz    3b
+       sync
+       isync
+
+       subi    r1,r1,SL_PC
+
+       /* Restore various CPU config stuffs */
+       bl      __restore_cpu_setup
+
+       /* Make sure all FPRs have been initialized */
+       bl      reloc_offset
+       bl      __init_fpu_registers
+
+       /* Invalidate & enable L1 cache, we don't care about
+        * whatever the ROM may have tried to write to memory
+        */
+       bl      __inval_enable_L1
+
+       /* Restore the BATs, and SDR1.  Then we can turn on the MMU. */
+       lwz     r4,SL_SDR1(r1)
+       mtsdr1  r4
+       lwz     r4,SL_SPRG0(r1)
+       mtsprg  0,r4
+       lwz     r4,SL_SPRG0+4(r1)
+       mtsprg  1,r4
+       lwz     r4,SL_SPRG0+8(r1)
+       mtsprg  2,r4
+       lwz     r4,SL_SPRG0+12(r1)
+       mtsprg  3,r4
+
+       lwz     r4,SL_DBAT0(r1)
+       mtdbatu 0,r4
+       lwz     r4,SL_DBAT0+4(r1)
+       mtdbatl 0,r4
+       lwz     r4,SL_DBAT1(r1)
+       mtdbatu 1,r4
+       lwz     r4,SL_DBAT1+4(r1)
+       mtdbatl 1,r4
+       lwz     r4,SL_DBAT2(r1)
+       mtdbatu 2,r4
+       lwz     r4,SL_DBAT2+4(r1)
+       mtdbatl 2,r4
+       lwz     r4,SL_DBAT3(r1)
+       mtdbatu 3,r4
+       lwz     r4,SL_DBAT3+4(r1)
+       mtdbatl 3,r4
+       lwz     r4,SL_IBAT0(r1)
+       mtibatu 0,r4
+       lwz     r4,SL_IBAT0+4(r1)
+       mtibatl 0,r4
+       lwz     r4,SL_IBAT1(r1)
+       mtibatu 1,r4
+       lwz     r4,SL_IBAT1+4(r1)
+       mtibatl 1,r4
+       lwz     r4,SL_IBAT2(r1)
+       mtibatu 2,r4
+       lwz     r4,SL_IBAT2+4(r1)
+       mtibatl 2,r4
+       lwz     r4,SL_IBAT3(r1)
+       mtibatu 3,r4
+       lwz     r4,SL_IBAT3+4(r1)
+       mtibatl 3,r4
+
+BEGIN_FTR_SECTION
+       li      r4,0
+       mtspr   SPRN_DBAT4U,r4
+       mtspr   SPRN_DBAT4L,r4
+       mtspr   SPRN_DBAT5U,r4
+       mtspr   SPRN_DBAT5L,r4
+       mtspr   SPRN_DBAT6U,r4
+       mtspr   SPRN_DBAT6L,r4
+       mtspr   SPRN_DBAT7U,r4
+       mtspr   SPRN_DBAT7L,r4
+       mtspr   SPRN_IBAT4U,r4
+       mtspr   SPRN_IBAT4L,r4
+       mtspr   SPRN_IBAT5U,r4
+       mtspr   SPRN_IBAT5L,r4
+       mtspr   SPRN_IBAT6U,r4
+       mtspr   SPRN_IBAT6L,r4
+       mtspr   SPRN_IBAT7U,r4
+       mtspr   SPRN_IBAT7L,r4
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+
+       /* Flush all TLBs */
+       lis     r4,0x1000
+1:     addic.  r4,r4,-0x1000
+       tlbie   r4
+       blt     1b
+       sync
+
+       /* restore the MSR and turn on the MMU */
+       lwz     r3,SL_MSR(r1)
+       bl      turn_on_mmu
+
+       /* get back the stack pointer */
+       tovirt(r1,r1)
+
+       /* Restore TB */
+       li      r3,0
+       mttbl   r3
+       lwz     r3,SL_TB(r1)
+       lwz     r4,SL_TB+4(r1)
+       mttbu   r3
+       mttbl   r4
+
+       /* Restore the callee-saved registers and return */
+       lwz     r0,SL_CR(r1)
+       mtcr    r0
+       lwz     r2,SL_R2(r1)
+       lmw     r12,SL_R12(r1)
+       addi    r1,r1,SL_SIZE
+       lwz     r0,4(r1)
+       mtlr    r0
+       blr
+
+turn_on_mmu:
+       mflr    r4
+       tovirt(r4,r4)
+       mtsrr0  r4
+       mtsrr1  r3
+       sync
+       isync
+       rfi
+
+#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
+
+       .section .data
+       .balign L1_CACHE_BYTES
+sleep_storage:
+       .long 0
+       .balign L1_CACHE_BYTES, 0
+
+#endif /* CONFIG_6xx */
+       .section .text
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
new file mode 100644 (file)
index 0000000..e1f9443
--- /dev/null
@@ -0,0 +1,865 @@
+/*
+ * SMP support for power macintosh.
+ *
+ * We support both the old "powersurge" SMP architecture
+ * and the current Core99 (G4 PowerMac) machines.
+ *
+ * Note that we don't support the very first rev. of
+ * Apple/DayStar 2 CPUs board, the one with the funky
+ * watchdog. Hopefully, none of these should be there except
+ * maybe internally to Apple. I should probably still add some
+ * code to detect this card though and disable SMP. --BenH.
+ *
+ * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
+ * and Ben Herrenschmidt <benh@kernel.crashing.org>.
+ *
+ * Support for DayStar quad CPU cards
+ * Copyright (C) XLR8, Inc. 1994-2000
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/cpu.h>
+
+#include <asm/ptrace.h>
+#include <asm/atomic.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/time.h>
+#include <asm/mpic.h>
+#include <asm/cacheflush.h>
+#include <asm/keylargo.h>
+#include <asm/pmac_low_i2c.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+extern void __secondary_start_pmac_0(void);
+
+#ifdef CONFIG_PPC32
+
+/* Sync flag for HW tb sync */
+static volatile int sec_tb_reset = 0;
+
+/*
+ * Powersurge (old powermac SMP) support.
+ */
+
+/* Addresses for powersurge registers */
+#define HAMMERHEAD_BASE                0xf8000000
+#define HHEAD_CONFIG           0x90
+#define HHEAD_SEC_INTR         0xc0
+
+/* register for interrupting the primary processor on the powersurge */
+/* N.B. this is actually the ethernet ROM! */
+#define PSURGE_PRI_INTR                0xf3019000
+
+/* register for storing the start address for the secondary processor */
+/* N.B. this is the PCI config space address register for the 1st bridge */
+#define PSURGE_START           0xf2800000
+
+/* Daystar/XLR8 4-CPU card */
+#define PSURGE_QUAD_REG_ADDR   0xf8800000
+
+#define PSURGE_QUAD_IRQ_SET    0
+#define PSURGE_QUAD_IRQ_CLR    1
+#define PSURGE_QUAD_IRQ_PRIMARY        2
+#define PSURGE_QUAD_CKSTOP_CTL 3
+#define PSURGE_QUAD_PRIMARY_ARB        4
+#define PSURGE_QUAD_BOARD_ID   6
+#define PSURGE_QUAD_WHICH_CPU  7
+#define PSURGE_QUAD_CKSTOP_RDBK        8
+#define PSURGE_QUAD_RESET_CTL  11
+
+#define PSURGE_QUAD_OUT(r, v)  (out_8(quad_base + ((r) << 4) + 4, (v)))
+#define PSURGE_QUAD_IN(r)      (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
+#define PSURGE_QUAD_BIS(r, v)  (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
+#define PSURGE_QUAD_BIC(r, v)  (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
+
+/* virtual addresses for the above */
+static volatile u8 __iomem *hhead_base;
+static volatile u8 __iomem *quad_base;
+static volatile u32 __iomem *psurge_pri_intr;
+static volatile u8 __iomem *psurge_sec_intr;
+static volatile u32 __iomem *psurge_start;
+
+/* values for psurge_type */
+#define PSURGE_NONE            -1
+#define PSURGE_DUAL            0
+#define PSURGE_QUAD_OKEE       1
+#define PSURGE_QUAD_COTTON     2
+#define PSURGE_QUAD_ICEGRASS   3
+
+/* what sort of powersurge board we have */
+static int psurge_type = PSURGE_NONE;
+
+/*
+ * Set and clear IPIs for powersurge.
+ */
+static inline void psurge_set_ipi(int cpu)
+{
+       if (psurge_type == PSURGE_NONE)
+               return;
+       if (cpu == 0)
+               in_be32(psurge_pri_intr);
+       else if (psurge_type == PSURGE_DUAL)
+               out_8(psurge_sec_intr, 0);
+       else
+               PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
+}
+
+static inline void psurge_clr_ipi(int cpu)
+{
+       if (cpu > 0) {
+               switch(psurge_type) {
+               case PSURGE_DUAL:
+                       out_8(psurge_sec_intr, ~0);
+               case PSURGE_NONE:
+                       break;
+               default:
+                       PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
+               }
+       }
+}
+
+/*
+ * On powersurge (old SMP powermac architecture) we don't have
+ * separate IPIs for separate messages like openpic does.  Instead
+ * we have a bitmap for each processor, where a 1 bit means that
+ * the corresponding message is pending for that processor.
+ * Ideally each cpu's entry would be in a different cache line.
+ *  -- paulus.
+ */
+static unsigned long psurge_smp_message[NR_CPUS];
+
+void psurge_smp_message_recv(struct pt_regs *regs)
+{
+       int cpu = smp_processor_id();
+       int msg;
+
+       /* clear interrupt */
+       psurge_clr_ipi(cpu);
+
+       if (num_online_cpus() < 2)
+               return;
+
+       /* make sure there is a message there */
+       for (msg = 0; msg < 4; msg++)
+               if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
+                       smp_message_recv(msg, regs);
+}
+
+irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
+{
+       psurge_smp_message_recv(regs);
+       return IRQ_HANDLED;
+}
+
+static void smp_psurge_message_pass(int target, int msg)
+{
+       int i;
+
+       if (num_online_cpus() < 2)
+               return;
+
+       for (i = 0; i < NR_CPUS; i++) {
+               if (!cpu_online(i))
+                       continue;
+               if (target == MSG_ALL
+                   || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
+                   || target == i) {
+                       set_bit(msg, &psurge_smp_message[i]);
+                       psurge_set_ipi(i);
+               }
+       }
+}
+
+/*
+ * Determine a quad card presence. We read the board ID register, we
+ * force the data bus to change to something else, and we read it again.
+ * It it's stable, then the register probably exist (ugh !)
+ */
+static int __init psurge_quad_probe(void)
+{
+       int type;
+       unsigned int i;
+
+       type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
+       if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
+           || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
+               return PSURGE_DUAL;
+
+       /* looks OK, try a slightly more rigorous test */
+       /* bogus is not necessarily cacheline-aligned,
+          though I don't suppose that really matters.  -- paulus */
+       for (i = 0; i < 100; i++) {
+               volatile u32 bogus[8];
+               bogus[(0+i)%8] = 0x00000000;
+               bogus[(1+i)%8] = 0x55555555;
+               bogus[(2+i)%8] = 0xFFFFFFFF;
+               bogus[(3+i)%8] = 0xAAAAAAAA;
+               bogus[(4+i)%8] = 0x33333333;
+               bogus[(5+i)%8] = 0xCCCCCCCC;
+               bogus[(6+i)%8] = 0xCCCCCCCC;
+               bogus[(7+i)%8] = 0x33333333;
+               wmb();
+               asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
+               mb();
+               if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
+                       return PSURGE_DUAL;
+       }
+       return type;
+}
+
+static void __init psurge_quad_init(void)
+{
+       int procbits;
+
+       if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
+       procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
+       if (psurge_type == PSURGE_QUAD_ICEGRASS)
+               PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
+       else
+               PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
+       mdelay(33);
+       out_8(psurge_sec_intr, ~0);
+       PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
+       PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
+       if (psurge_type != PSURGE_QUAD_ICEGRASS)
+               PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
+       PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
+       mdelay(33);
+       PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
+       mdelay(33);
+       PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
+       mdelay(33);
+}
+
+static int __init smp_psurge_probe(void)
+{
+       int i, ncpus;
+
+       /* We don't do SMP on the PPC601 -- paulus */
+       if (PVR_VER(mfspr(SPRN_PVR)) == 1)
+               return 1;
+
+       /*
+        * The powersurge cpu board can be used in the generation
+        * of powermacs that have a socket for an upgradeable cpu card,
+        * including the 7500, 8500, 9500, 9600.
+        * The device tree doesn't tell you if you have 2 cpus because
+        * OF doesn't know anything about the 2nd processor.
+        * Instead we look for magic bits in magic registers,
+        * in the hammerhead memory controller in the case of the
+        * dual-cpu powersurge board.  -- paulus.
+        */
+       if (find_devices("hammerhead") == NULL)
+               return 1;
+
+       hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
+       quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
+       psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
+
+       psurge_type = psurge_quad_probe();
+       if (psurge_type != PSURGE_DUAL) {
+               psurge_quad_init();
+               /* All released cards using this HW design have 4 CPUs */
+               ncpus = 4;
+       } else {
+               iounmap(quad_base);
+               if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
+                       /* not a dual-cpu card */
+                       iounmap(hhead_base);
+                       psurge_type = PSURGE_NONE;
+                       return 1;
+               }
+               ncpus = 2;
+       }
+
+       psurge_start = ioremap(PSURGE_START, 4);
+       psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
+
+       /* this is not actually strictly necessary -- paulus. */
+       for (i = 1; i < ncpus; ++i)
+               smp_hw_index[i] = i;
+
+       if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
+
+       return ncpus;
+}
+
+static void __init smp_psurge_kick_cpu(int nr)
+{
+       unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
+       unsigned long a;
+
+       /* may need to flush here if secondary bats aren't setup */
+       for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
+               asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
+       asm volatile("sync");
+
+       if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
+
+       out_be32(psurge_start, start);
+       mb();
+
+       psurge_set_ipi(nr);
+       udelay(10);
+       psurge_clr_ipi(nr);
+
+       if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
+}
+
+/*
+ * With the dual-cpu powersurge board, the decrementers and timebases
+ * of both cpus are frozen after the secondary cpu is started up,
+ * until we give the secondary cpu another interrupt.  This routine
+ * uses this to get the timebases synchronized.
+ *  -- paulus.
+ */
+static void __init psurge_dual_sync_tb(int cpu_nr)
+{
+       int t;
+
+       set_dec(tb_ticks_per_jiffy);
+       set_tb(0, 0);
+       last_jiffy_stamp(cpu_nr) = 0;
+
+       if (cpu_nr > 0) {
+               mb();
+               sec_tb_reset = 1;
+               return;
+       }
+
+       /* wait for the secondary to have reset its TB before proceeding */
+       for (t = 10000000; t > 0 && !sec_tb_reset; --t)
+               ;
+
+       /* now interrupt the secondary, starting both TBs */
+       psurge_set_ipi(1);
+
+       smp_tb_synchronized = 1;
+}
+
+static struct irqaction psurge_irqaction = {
+       .handler = psurge_primary_intr,
+       .flags = SA_INTERRUPT,
+       .mask = CPU_MASK_NONE,
+       .name = "primary IPI",
+};
+
+static void __init smp_psurge_setup_cpu(int cpu_nr)
+{
+
+       if (cpu_nr == 0) {
+               /* If we failed to start the second CPU, we should still
+                * send it an IPI to start the timebase & DEC or we might
+                * have them stuck.
+                */
+               if (num_online_cpus() < 2) {
+                       if (psurge_type == PSURGE_DUAL)
+                               psurge_set_ipi(1);
+                       return;
+               }
+               /* reset the entry point so if we get another intr we won't
+                * try to startup again */
+               out_be32(psurge_start, 0x100);
+               if (setup_irq(30, &psurge_irqaction))
+                       printk(KERN_ERR "Couldn't get primary IPI interrupt");
+       }
+
+       if (psurge_type == PSURGE_DUAL)
+               psurge_dual_sync_tb(cpu_nr);
+}
+
+void __init smp_psurge_take_timebase(void)
+{
+       /* Dummy implementation */
+}
+
+void __init smp_psurge_give_timebase(void)
+{
+       /* Dummy implementation */
+}
+
+/* PowerSurge-style Macs */
+struct smp_ops_t psurge_smp_ops = {
+       .message_pass   = smp_psurge_message_pass,
+       .probe          = smp_psurge_probe,
+       .kick_cpu       = smp_psurge_kick_cpu,
+       .setup_cpu      = smp_psurge_setup_cpu,
+       .give_timebase  = smp_psurge_give_timebase,
+       .take_timebase  = smp_psurge_take_timebase,
+};
+#endif /* CONFIG_PPC32 - actually powersurge support */
+
+#ifdef CONFIG_PPC64
+/*
+ * G5s enable/disable the timebase via an i2c-connected clock chip.
+ */
+static struct device_node *pmac_tb_clock_chip_host;
+static u8 pmac_tb_pulsar_addr;
+static void (*pmac_tb_freeze)(int freeze);
+static DEFINE_SPINLOCK(timebase_lock);
+static unsigned long timebase;
+
+static void smp_core99_cypress_tb_freeze(int freeze)
+{
+       u8 data;
+       int rc;
+
+       /* Strangely, the device-tree says address is 0xd2, but darwin
+        * accesses 0xd0 ...
+        */
+       pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
+       rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
+                              0xd0 | pmac_low_i2c_read,
+                              0x81, &data, 1);
+       if (rc != 0)
+               goto bail;
+
+       data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
+
+               pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
+       rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
+                              0xd0 | pmac_low_i2c_write,
+                              0x81, &data, 1);
+
+ bail:
+       if (rc != 0) {
+               printk("Cypress Timebase %s rc: %d\n",
+                      freeze ? "freeze" : "unfreeze", rc);
+               panic("Timebase freeze failed !\n");
+       }
+}
+
+
+static void smp_core99_pulsar_tb_freeze(int freeze)
+{
+       u8 data;
+       int rc;
+
+       pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
+       rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
+                              pmac_tb_pulsar_addr | pmac_low_i2c_read,
+                              0x2e, &data, 1);
+       if (rc != 0)
+               goto bail;
+
+       data = (data & 0x88) | (freeze ? 0x11 : 0x22);
+
+       pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
+       rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
+                              pmac_tb_pulsar_addr | pmac_low_i2c_write,
+                              0x2e, &data, 1);
+ bail:
+       if (rc != 0) {
+               printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
+                      freeze ? "freeze" : "unfreeze", rc);
+               panic("Timebase freeze failed !\n");
+       }
+}
+
+
+static void smp_core99_give_timebase(void)
+{
+       /* Open i2c bus for synchronous access */
+       if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
+               panic("Can't open i2c for TB sync !\n");
+
+       spin_lock(&timebase_lock);
+       (*pmac_tb_freeze)(1);
+       mb();
+       timebase = get_tb();
+       spin_unlock(&timebase_lock);
+
+       while (timebase)
+               barrier();
+
+       spin_lock(&timebase_lock);
+       (*pmac_tb_freeze)(0);
+       spin_unlock(&timebase_lock);
+
+       /* Close i2c bus */
+       pmac_low_i2c_close(pmac_tb_clock_chip_host);
+}
+
+
+static void __devinit smp_core99_take_timebase(void)
+{
+       while (!timebase)
+               barrier();
+       spin_lock(&timebase_lock);
+       set_tb(timebase >> 32, timebase & 0xffffffff);
+       timebase = 0;
+       spin_unlock(&timebase_lock);
+}
+
+static void __init smp_core99_setup(int ncpus)
+{
+       struct device_node *cc = NULL;  
+       struct device_node *p;
+       u32 *reg;
+       int ok;
+
+       /* HW sync only on these platforms */
+       if (!machine_is_compatible("PowerMac7,2") &&
+           !machine_is_compatible("PowerMac7,3") &&
+           !machine_is_compatible("RackMac3,1"))
+               return;
+
+       /* Look for the clock chip */
+       while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
+               p = of_get_parent(cc);
+               ok = p && device_is_compatible(p, "uni-n-i2c");
+               of_node_put(p);
+               if (!ok)
+                       continue;
+
+               reg = (u32 *)get_property(cc, "reg", NULL);
+               if (reg == NULL)
+                       continue;
+
+               switch (*reg) {
+               case 0xd2:
+                       if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
+                               pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
+                               pmac_tb_pulsar_addr = 0xd2;
+                               printk(KERN_INFO "Timebase clock is Pulsar chip\n");
+                       } else if (device_is_compatible(cc, "cy28508")) {
+                               pmac_tb_freeze = smp_core99_cypress_tb_freeze;
+                               printk(KERN_INFO "Timebase clock is Cypress chip\n");
+                       }
+                       break;
+               case 0xd4:
+                       pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
+                       pmac_tb_pulsar_addr = 0xd4;
+                       printk(KERN_INFO "Timebase clock is Pulsar chip\n");
+                       break;
+               }
+               if (pmac_tb_freeze != NULL) {
+                       pmac_tb_clock_chip_host = of_get_parent(cc);
+                       of_node_put(cc);
+                       break;
+               }
+       }
+       if (pmac_tb_freeze == NULL) {
+               smp_ops->give_timebase = smp_generic_give_timebase;
+               smp_ops->take_timebase = smp_generic_take_timebase;
+       }
+}
+
+/* nothing to do here, caches are already set up by service processor */
+static inline void __devinit core99_init_caches(int cpu)
+{
+}
+
+#else /* CONFIG_PPC64 */
+
+/*
+ * SMP G4 powermacs use a GPIO to enable/disable the timebase.
+ */
+
+static unsigned int core99_tb_gpio;    /* Timebase freeze GPIO */
+
+static unsigned int pri_tb_hi, pri_tb_lo;
+static unsigned int pri_tb_stamp;
+
+/* not __init, called in sleep/wakeup code */
+void smp_core99_give_timebase(void)
+{
+       unsigned long flags;
+       unsigned int t;
+
+       /* wait for the secondary to be in take_timebase */
+       for (t = 100000; t > 0 && !sec_tb_reset; --t)
+               udelay(10);
+       if (!sec_tb_reset) {
+               printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
+               return;
+       }
+
+       /* freeze the timebase and read it */
+       /* disable interrupts so the timebase is disabled for the
+          shortest possible time */
+       local_irq_save(flags);
+       pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
+       pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
+       mb();
+       pri_tb_hi = get_tbu();
+       pri_tb_lo = get_tbl();
+       pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
+       mb();
+
+       /* tell the secondary we're ready */
+       sec_tb_reset = 2;
+       mb();
+
+       /* wait for the secondary to have taken it */
+       for (t = 100000; t > 0 && sec_tb_reset; --t)
+               udelay(10);
+       if (sec_tb_reset)
+               printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
+       else
+               smp_tb_synchronized = 1;
+
+       /* Now, restart the timebase by leaving the GPIO to an open collector */
+               pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
+        pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
+       local_irq_restore(flags);
+}
+
+/* not __init, called in sleep/wakeup code */
+void smp_core99_take_timebase(void)
+{
+       unsigned long flags;
+
+       /* tell the primary we're here */
+       sec_tb_reset = 1;
+       mb();
+
+       /* wait for the primary to set pri_tb_hi/lo */
+       while (sec_tb_reset < 2)
+               mb();
+
+       /* set our stuff the same as the primary */
+       local_irq_save(flags);
+       set_dec(1);
+       set_tb(pri_tb_hi, pri_tb_lo);
+       last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
+       mb();
+
+       /* tell the primary we're done */
+               sec_tb_reset = 0;
+       mb();
+       local_irq_restore(flags);
+}
+
+/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
+volatile static long int core99_l2_cache;
+volatile static long int core99_l3_cache;
+
+static void __devinit core99_init_caches(int cpu)
+{
+       if (!cpu_has_feature(CPU_FTR_L2CR))
+               return;
+
+       if (cpu == 0) {
+               core99_l2_cache = _get_L2CR();
+               printk("CPU0: L2CR is %lx\n", core99_l2_cache);
+       } else {
+               printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
+               _set_L2CR(0);
+               _set_L2CR(core99_l2_cache);
+               printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
+       }
+
+       if (!cpu_has_feature(CPU_FTR_L3CR))
+               return;
+
+       if (cpu == 0){
+               core99_l3_cache = _get_L3CR();
+               printk("CPU0: L3CR is %lx\n", core99_l3_cache);
+       } else {
+               printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
+               _set_L3CR(0);
+               _set_L3CR(core99_l3_cache);
+               printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
+       }
+}
+
+static void __init smp_core99_setup(int ncpus)
+{
+       struct device_node *cpu;
+       u32 *tbprop = NULL;
+       int i;
+
+       core99_tb_gpio = KL_GPIO_TB_ENABLE;     /* default value */
+       cpu = of_find_node_by_type(NULL, "cpu");
+       if (cpu != NULL) {
+               tbprop = (u32 *)get_property(cpu, "timebase-enable", NULL);
+               if (tbprop)
+                       core99_tb_gpio = *tbprop;
+               of_node_put(cpu);
+       }
+
+       /* XXX should get this from reg properties */
+       for (i = 1; i < ncpus; ++i)
+               smp_hw_index[i] = i;
+       powersave_nap = 0;
+}
+#endif
+
+static int __init smp_core99_probe(void)
+{
+       struct device_node *cpus;
+       int ncpus = 0;
+
+       if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
+
+       /* Count CPUs in the device-tree */
+               for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
+               ++ncpus;
+
+       printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
+
+       /* Nothing more to do if less than 2 of them */
+       if (ncpus <= 1)
+               return 1;
+
+       smp_core99_setup(ncpus);
+       mpic_request_ipis();
+       core99_init_caches(0);
+
+       return ncpus;
+}
+
+static void __devinit smp_core99_kick_cpu(int nr)
+{
+       unsigned int save_vector;
+       unsigned long new_vector;
+       unsigned long flags;
+       volatile unsigned int *vector
+                = ((volatile unsigned int *)(KERNELBASE+0x100));
+
+       if (nr < 0 || nr > 3)
+               return;
+       if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
+
+       local_irq_save(flags);
+       local_irq_disable();
+
+       /* Save reset vector */
+       save_vector = *vector;
+
+       /* Setup fake reset vector that does    
+        *   b __secondary_start_pmac_0 + nr*8 - KERNELBASE
+        */
+       new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
+       *vector = 0x48000002 + new_vector - KERNELBASE;
+
+       /* flush data cache and inval instruction cache */
+       flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+
+       /* Put some life in our friend */
+       pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
+
+       /* FIXME: We wait a bit for the CPU to take the exception, I should
+        * instead wait for the entry code to set something for me. Well,
+        * ideally, all that crap will be done in prom.c and the CPU left
+        * in a RAM-based wait loop like CHRP.
+        */
+       mdelay(1);
+
+       /* Restore our exception vector */
+       *vector = save_vector;
+       flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+
+       local_irq_restore(flags);
+       if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
+}
+
+static void __devinit smp_core99_setup_cpu(int cpu_nr)
+{
+       /* Setup L2/L3 */
+       if (cpu_nr != 0)
+               core99_init_caches(cpu_nr);
+
+       /* Setup openpic */
+       mpic_setup_this_cpu();
+
+       if (cpu_nr == 0) {
+#ifdef CONFIG_POWER4
+               extern void g5_phy_disable_cpu1(void);
+
+               /* If we didn't start the second CPU, we must take
+                * it off the bus
+                */
+               if (machine_is_compatible("MacRISC4") &&
+                   num_online_cpus() < 2)              
+                       g5_phy_disable_cpu1();
+#endif /* CONFIG_POWER4 */
+               if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
+       }
+}
+
+
+/* Core99 Macs (dual G4s and G5s) */
+struct smp_ops_t core99_smp_ops = {
+       .message_pass   = smp_mpic_message_pass,
+       .probe          = smp_core99_probe,
+       .kick_cpu       = smp_core99_kick_cpu,
+       .setup_cpu      = smp_core99_setup_cpu,
+       .give_timebase  = smp_core99_give_timebase,
+       .take_timebase  = smp_core99_take_timebase,
+};
+
+#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
+
+int __cpu_disable(void)
+{
+       cpu_clear(smp_processor_id(), cpu_online_map);
+
+       /* XXX reset cpu affinity here */
+       mpic_cpu_set_priority(0xf);
+       asm volatile("mtdec %0" : : "r" (0x7fffffff));
+       mb();
+       udelay(20);
+       asm volatile("mtdec %0" : : "r" (0x7fffffff));
+       return 0;
+}
+
+extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
+static int cpu_dead[NR_CPUS];
+
+void cpu_die(void)
+{
+       local_irq_disable();
+       cpu_dead[smp_processor_id()] = 1;
+       mb();
+       low_cpu_die();
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       int timeout;
+
+       timeout = 1000;
+       while (!cpu_dead[cpu]) {
+               if (--timeout == 0) {
+                       printk("CPU %u refused to die!\n", cpu);
+                       break;
+               }
+               msleep(1);
+       }
+       cpu_callin_map[cpu] = 0;
+       cpu_dead[cpu] = 0;
+}
+
+#endif
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
new file mode 100644 (file)
index 0000000..82982bf
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * Support for periodic interrupts (100 per second) and for getting
+ * the current time from the RTC on Power Macintoshes.
+ *
+ * We use the decrementer register for our periodic interrupts.
+ *
+ * Paul Mackerras      August 1996.
+ * Copyright (C) 1996 Paul Mackerras.
+ * Copyright (C) 2003-2005 Benjamin Herrenschmidt.
+ *
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/adb.h>
+#include <linux/cuda.h>
+#include <linux/pmu.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
+#include <linux/rtc.h>
+
+#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/nvram.h>
+#include <asm/smu.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* Apparently the RTC stores seconds since 1 Jan 1904 */
+#define RTC_OFFSET     2082844800
+
+/*
+ * Calibrate the decrementer frequency with the VIA timer 1.
+ */
+#define VIA_TIMER_FREQ_6       4700000 /* time 1 frequency * 6 */
+
+/* VIA registers */
+#define RS             0x200           /* skip between registers */
+#define T1CL           (4*RS)          /* Timer 1 ctr/latch (low 8 bits) */
+#define T1CH           (5*RS)          /* Timer 1 counter (high 8 bits) */
+#define T1LL           (6*RS)          /* Timer 1 latch (low 8 bits) */
+#define T1LH           (7*RS)          /* Timer 1 latch (high 8 bits) */
+#define ACR            (11*RS)         /* Auxiliary control register */
+#define IFR            (13*RS)         /* Interrupt flag register */
+
+/* Bits in ACR */
+#define T1MODE         0xc0            /* Timer 1 mode */
+#define T1MODE_CONT    0x40            /*  continuous interrupts */
+
+/* Bits in IFR and IER */
+#define T1_INT         0x40            /* Timer 1 interrupt */
+
+long __init pmac_time_init(void)
+{
+       s32 delta = 0;
+#ifdef CONFIG_NVRAM
+       int dst;
+       
+       delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
+       delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
+       delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
+       if (delta & 0x00800000UL)
+               delta |= 0xFF000000UL;
+       dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
+       printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
+               dst ? "on" : "off");
+#endif
+       return delta;
+}
+
+static void to_rtc_time(unsigned long now, struct rtc_time *tm)
+{
+       to_tm(now, tm);
+       tm->tm_year -= 1900;
+       tm->tm_mon -= 1;
+}
+
+static unsigned long from_rtc_time(struct rtc_time *tm)
+{
+       return mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday,
+                     tm->tm_hour, tm->tm_min, tm->tm_sec);
+}
+
+#ifdef CONFIG_ADB_CUDA
+static unsigned long cuda_get_time(void)
+{
+       struct adb_request req;
+       unsigned long now;
+
+       if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
+               return 0;
+       while (!req.complete)
+               cuda_poll();
+       if (req.reply_len != 7)
+               printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
+                      req.reply_len);
+       now = (req.reply[3] << 24) + (req.reply[4] << 16)
+               + (req.reply[5] << 8) + req.reply[6];
+       if (now < RTC_OFFSET)
+               return 0;
+       return now - RTC_OFFSET;
+}
+
+#define cuda_get_rtc_time(tm)  to_rtc_time(cuda_get_time(), (tm))
+
+static int cuda_set_rtc_time(struct rtc_time *tm)
+{
+       unsigned int nowtime;
+       struct adb_request req;
+
+       nowtime = from_rtc_time(tm) + RTC_OFFSET;
+       if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
+                        nowtime >> 24, nowtime >> 16, nowtime >> 8,
+                        nowtime) < 0)
+               return -ENXIO;
+       while (!req.complete)
+               cuda_poll();
+       if ((req.reply_len != 3) && (req.reply_len != 7))
+               printk(KERN_ERR "cuda_set_rtc_time: got %d byte reply\n",
+                      req.reply_len);
+       return 0;
+}
+
+#else
+#define cuda_get_time()                0
+#define cuda_get_rtc_time(tm)
+#define cuda_set_rtc_time(tm)  0
+#endif
+
+#ifdef CONFIG_ADB_PMU
+static unsigned long pmu_get_time(void)
+{
+       struct adb_request req;
+       unsigned long now;
+
+       if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
+               return 0;
+       pmu_wait_complete(&req);
+       if (req.reply_len != 4)
+               printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
+                      req.reply_len);
+       now = (req.reply[0] << 24) + (req.reply[1] << 16)
+               + (req.reply[2] << 8) + req.reply[3];
+       if (now < RTC_OFFSET)
+               return 0;
+       return now - RTC_OFFSET;
+}
+
+#define pmu_get_rtc_time(tm)   to_rtc_time(pmu_get_time(), (tm))
+
+static int pmu_set_rtc_time(struct rtc_time *tm)
+{
+       unsigned int nowtime;
+       struct adb_request req;
+
+       nowtime = from_rtc_time(tm) + RTC_OFFSET;
+       if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
+                       nowtime >> 16, nowtime >> 8, nowtime) < 0)
+               return -ENXIO;
+       pmu_wait_complete(&req);
+       if (req.reply_len != 0)
+               printk(KERN_ERR "pmu_set_rtc_time: %d byte reply from PMU\n",
+                      req.reply_len);
+       return 0;
+}
+
+#else
+#define pmu_get_time()         0
+#define pmu_get_rtc_time(tm)
+#define pmu_set_rtc_time(tm)   0
+#endif
+
+#ifdef CONFIG_PMAC_SMU
+static unsigned long smu_get_time(void)
+{
+       struct rtc_time tm;
+
+       if (smu_get_rtc_time(&tm, 1))
+               return 0;
+       return from_rtc_time(&tm);
+}
+
+#else
+#define smu_get_time()                 0
+#define smu_get_rtc_time(tm, spin)
+#define smu_set_rtc_time(tm, spin)     0
+#endif
+
+unsigned long pmac_get_boot_time(void)
+{
+       /* Get the time from the RTC, used only at boot time */
+       switch (sys_ctrler) {
+       case SYS_CTRLER_CUDA:
+               return cuda_get_time();
+       case SYS_CTRLER_PMU:
+               return pmu_get_time();
+       case SYS_CTRLER_SMU:
+               return smu_get_time();
+       default:
+               return 0;
+       }
+}
+
+void pmac_get_rtc_time(struct rtc_time *tm)
+{
+       /* Get the time from the RTC, used only at boot time */
+       switch (sys_ctrler) {
+       case SYS_CTRLER_CUDA:
+               cuda_get_rtc_time(tm);
+               break;
+       case SYS_CTRLER_PMU:
+               pmu_get_rtc_time(tm);
+               break;
+       case SYS_CTRLER_SMU:
+               smu_get_rtc_time(tm, 1);
+               break;
+       default:
+               ;
+       }
+}
+
+int pmac_set_rtc_time(struct rtc_time *tm)
+{
+       switch (sys_ctrler) {
+       case SYS_CTRLER_CUDA:
+               return cuda_set_rtc_time(tm);
+       case SYS_CTRLER_PMU:
+               return pmu_set_rtc_time(tm);
+       case SYS_CTRLER_SMU:
+               return smu_set_rtc_time(tm, 1);
+       default:
+               return -ENODEV;
+       }
+}
+
+#ifdef CONFIG_PPC32
+/*
+ * Calibrate the decrementer register using VIA timer 1.
+ * This is used both on powermacs and CHRP machines.
+ */
+int __init via_calibrate_decr(void)
+{
+       struct device_node *vias;
+       volatile unsigned char __iomem *via;
+       int count = VIA_TIMER_FREQ_6 / 100;
+       unsigned int dstart, dend;
+
+       vias = find_devices("via-cuda");
+       if (vias == 0)
+               vias = find_devices("via-pmu");
+       if (vias == 0)
+               vias = find_devices("via");
+       if (vias == 0 || vias->n_addrs == 0)
+               return 0;
+       via = ioremap(vias->addrs[0].address, vias->addrs[0].size);
+
+       /* set timer 1 for continuous interrupts */
+       out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT);
+       /* set the counter to a small value */
+       out_8(&via[T1CH], 2);
+       /* set the latch to `count' */
+       out_8(&via[T1LL], count);
+       out_8(&via[T1LH], count >> 8);
+       /* wait until it hits 0 */
+       while ((in_8(&via[IFR]) & T1_INT) == 0)
+               ;
+       dstart = get_dec();
+       /* clear the interrupt & wait until it hits 0 again */
+       in_8(&via[T1CL]);
+       while ((in_8(&via[IFR]) & T1_INT) == 0)
+               ;
+       dend = get_dec();
+
+       ppc_tb_freq = (dstart - dend) * 100 / 6;
+
+       iounmap(via);
+       
+       return 1;
+}
+#endif
+
+#ifdef CONFIG_PM
+/*
+ * Reset the time after a sleep.
+ */
+static int
+time_sleep_notify(struct pmu_sleep_notifier *self, int when)
+{
+       static unsigned long time_diff;
+       unsigned long flags;
+       unsigned long seq;
+       struct timespec tv;
+
+       switch (when) {
+       case PBOOK_SLEEP_NOW:
+               do {
+                       seq = read_seqbegin_irqsave(&xtime_lock, flags);
+                       time_diff = xtime.tv_sec - pmac_get_boot_time();
+               } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+               break;
+       case PBOOK_WAKE:
+               tv.tv_sec = pmac_get_boot_time() + time_diff;
+               tv.tv_nsec = 0;
+               do_settimeofday(&tv);
+               break;
+       }
+       return PBOOK_SLEEP_OK;
+}
+
+static struct pmu_sleep_notifier time_sleep_notifier = {
+       time_sleep_notify, SLEEP_LEVEL_MISC,
+};
+#endif /* CONFIG_PM */
+
+/*
+ * Query the OF and get the decr frequency.
+ */
+void __init pmac_calibrate_decr(void)
+{
+#ifdef CONFIG_PM
+       /* XXX why here? */
+       pmu_register_sleep_notifier(&time_sleep_notifier);
+#endif /* CONFIG_PM */
+
+       generic_calibrate_decr();
+
+#ifdef CONFIG_PPC32
+       /* We assume MacRISC2 machines have correct device-tree
+        * calibration. That's better since the VIA itself seems
+        * to be slightly off. --BenH
+        */
+       if (!machine_is_compatible("MacRISC2") &&
+           !machine_is_compatible("MacRISC3") &&
+           !machine_is_compatible("MacRISC4"))
+               if (via_calibrate_decr())
+                       return;
+
+       /* Special case: QuickSilver G4s seem to have a badly calibrated
+        * timebase-frequency in OF, VIA is much better on these. We should
+        * probably implement calibration based on the KL timer on these
+        * machines anyway... -BenH
+        */
+       if (machine_is_compatible("PowerMac3,5"))
+               if (via_calibrate_decr())
+                       return;
+#endif
+}
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
new file mode 100644 (file)
index 0000000..673ac47
--- /dev/null
@@ -0,0 +1,22 @@
+
+config PREP_RESIDUAL
+       bool "Support for PReP Residual Data"
+       depends on PPC_PREP
+       help
+         Some PReP systems have residual data passed to the kernel by the
+         firmware.  This allows detection of memory size, devices present and
+         other useful pieces of information.  Sometimes this information is
+         not present or incorrect, in which case it could lead to the machine 
+         behaving incorrectly.  If this happens, either disable PREP_RESIDUAL
+         or pass the 'noresidual' option to the kernel.
+
+         If you are running a PReP system, say Y here, otherwise say N.
+
+config PROC_PREPRESIDUAL
+       bool "Support for reading of PReP Residual Data in /proc"
+       depends on PREP_RESIDUAL && PROC_FS
+       help
+         Enabling this option will create a /proc/residual file which allows
+         you to get at the residual data on PReP systems.  You will need a tool
+         (lsresidual) to parse it.  If you aren't on a PReP system, you don't
+         want this.
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
new file mode 100644 (file)
index 0000000..7a3b6fc
--- /dev/null
@@ -0,0 +1,47 @@
+
+config PPC_SPLPAR
+       depends on PPC_PSERIES
+       bool "Support for shared-processor logical partitions"
+       default n
+       help
+         Enabling this option will make the kernel run more efficiently
+         on logically-partitioned pSeries systems which use shared
+         processors, that is, which share physical processors between
+         two or more partitions.
+
+config HMT
+       bool "Hardware multithreading"
+       depends on SMP && PPC_PSERIES && BROKEN
+       help
+         This option enables hardware multithreading on RS64 cpus.
+         pSeries systems p620 and p660 have such a cpu type.
+
+config EEH
+       bool "PCI Extended Error Handling (EEH)" if EMBEDDED
+       depends on PPC_PSERIES
+       default y if !EMBEDDED
+
+config PPC_RTAS
+       bool
+       depends on PPC_PSERIES || PPC_BPA
+       default y
+
+config RTAS_PROC
+       bool "Proc interface to RTAS"
+       depends on PPC_RTAS
+       default y
+
+config RTAS_FLASH
+       tristate "Firmware flash interface"
+       depends on PPC64 && RTAS_PROC
+
+config SCANLOG
+       tristate "Scanlog dump interface"
+       depends on RTAS_PROC && PPC_PSERIES
+
+config LPARCFG
+       tristate "LPAR Configuration Data"
+       depends on PPC_PSERIES || PPC_ISERIES
+       help
+       Provide system capacity information via human readable
+       <key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
new file mode 100644 (file)
index 0000000..9ebb341
--- /dev/null
@@ -0,0 +1,4 @@
+obj-y                  := pci.o lpar.o hvCall.o nvram.o reconfig.o \
+                          setup.o iommu.o
+obj-$(CONFIG_SMP)      += smp.o
+obj-$(CONFIG_IBMVIO)   += vio.o
similarity index 96%
rename from arch/ppc64/kernel/pSeries_iommu.c
rename to arch/powerpc/platforms/pseries/iommu.c
index d17f010..9e90d41 100644 (file)
@@ -46,7 +46,8 @@
 #include <asm/pSeries_reconfig.h>
 #include <asm/systemcfg.h>
 #include <asm/firmware.h>
-#include "pci.h"
+#include <asm/tce.h>
+#include <asm/ppc-pci.h>
 
 #define DBG(fmt...)
 
@@ -59,6 +60,9 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
        union tce_entry t;
        union tce_entry *tp;
 
+       index <<= TCE_PAGE_FACTOR;
+       npages <<= TCE_PAGE_FACTOR;
+
        t.te_word = 0;
        t.te_rdwr = 1; // Read allowed 
 
@@ -69,11 +73,11 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
 
        while (npages--) {
                /* can't move this out since we might cross LMB boundary */
-               t.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
+               t.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
        
                tp->te_word = t.te_word;
 
-               uaddr += PAGE_SIZE;
+               uaddr += TCE_PAGE_SIZE;
                tp++;
        }
 }
@@ -84,6 +88,9 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
        union tce_entry t;
        union tce_entry *tp;
 
+       npages <<= TCE_PAGE_FACTOR;
+       index <<= TCE_PAGE_FACTOR;
+
        t.te_word = 0;
        tp  = ((union tce_entry *)tbl->it_base) + index;
                
@@ -103,7 +110,7 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
        union tce_entry tce;
 
        tce.te_word = 0;
-       tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
+       tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
        tce.te_rdwr = 1;
        if (direction != DMA_TO_DEVICE)
                tce.te_pciwr = 1;
@@ -136,6 +143,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
        union tce_entry tce, *tcep;
        long l, limit;
 
+       tcenum <<= TCE_PAGE_FACTOR;
+       npages <<= TCE_PAGE_FACTOR;
+
        if (npages == 1)
                return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
                                           direction);
@@ -155,7 +165,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
        }
 
        tce.te_word = 0;
-       tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
+       tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
        tce.te_rdwr = 1;
        if (direction != DMA_TO_DEVICE)
                tce.te_pciwr = 1;
@@ -166,7 +176,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
                 * Set up the page with TCE data, looping through and setting
                 * the values.
                 */
-               limit = min_t(long, npages, PAGE_SIZE/sizeof(union tce_entry));
+               limit = min_t(long, npages, 4096/sizeof(union tce_entry));
 
                for (l = 0; l < limit; l++) {
                        tcep[l] = tce;
@@ -196,6 +206,9 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages
        u64 rc;
        union tce_entry tce;
 
+       tcenum <<= TCE_PAGE_FACTOR;
+       npages <<= TCE_PAGE_FACTOR;
+
        tce.te_word = 0;
 
        while (npages--) {
@@ -221,6 +234,9 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
        u64 rc;
        union tce_entry tce;
 
+       tcenum <<= TCE_PAGE_FACTOR;
+       npages <<= TCE_PAGE_FACTOR;
+
        tce.te_word = 0;
 
        rc = plpar_tce_stuff((u64)tbl->it_index,
similarity index 98%
rename from arch/ppc64/kernel/pSeries_lpar.c
rename to arch/powerpc/platforms/pseries/lpar.c
index a6de83f..268d836 100644 (file)
@@ -486,8 +486,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
  * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
  * lock.
  */
-void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
-                                  int local)
+void pSeries_lpar_flush_hash_range(unsigned long number, int local)
 {
        int i;
        unsigned long flags = 0;
@@ -498,7 +497,7 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
                spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
 
        for (i = 0; i < number; i++)
-               flush_hash_page(context, batch->addr[i], batch->pte[i], local);
+               flush_hash_page(batch->vaddr[i], batch->pte[i], local);
 
        if (lock_tlbie)
                spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
similarity index 99%
rename from arch/ppc64/kernel/pSeries_pci.c
rename to arch/powerpc/platforms/pseries/pci.c
index 928f8fe..c198656 100644 (file)
@@ -29,8 +29,7 @@
 
 #include <asm/pci-bridge.h>
 #include <asm/prom.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 static int __devinitdata s7a_workaround = -1;
 
similarity index 97%
rename from arch/ppc64/kernel/pSeries_setup.c
rename to arch/powerpc/platforms/pseries/setup.c
index 3009701..92d1800 100644 (file)
 #include <asm/xics.h>
 #include <asm/firmware.h>
 #include <asm/pmc.h>
-
-#include "i8259.h"
-#include "mpic.h"
-#include "pci.h"
+#include <asm/mpic.h>
+#include <asm/ppc-pci.h>
+#include <asm/i8259.h>
+#include <asm/udbg.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -84,13 +84,13 @@ int fwnmi_active;  /* TRUE if an FWNMI handler is present */
 extern void pSeries_system_reset_exception(struct pt_regs *regs);
 extern int pSeries_machine_check_exception(struct pt_regs *regs);
 
-static int pseries_shared_idle(void);
-static int pseries_dedicated_idle(void);
+static void pseries_shared_idle(void);
+static void pseries_dedicated_idle(void);
 
 static volatile void __iomem * chrp_int_ack_special;
 struct mpic *pSeries_mpic;
 
-void pSeries_get_cpuinfo(struct seq_file *m)
+void pSeries_show_cpuinfo(struct seq_file *m)
 {
        struct device_node *root;
        const char *model = "";
@@ -124,7 +124,7 @@ static int pSeries_irq_cascade(struct pt_regs *regs, void *data)
        if (chrp_int_ack_special)
                return readb(chrp_int_ack_special);
        else
-               return i8259_irq(smp_processor_id());
+               return i8259_irq(regs);
 }
 
 static void __init pSeries_init_mpic(void)
@@ -241,10 +241,6 @@ static void __init pSeries_setup_arch(void)
        find_and_init_phbs();
        eeh_init();
 
-#ifdef CONFIG_DUMMY_CONSOLE
-       conswitchp = &dummy_con;
-#endif
-
        pSeries_nvram_init();
 
        /* Choose an idle loop */
@@ -488,8 +484,8 @@ static inline void dedicated_idle_sleep(unsigned int cpu)
        }
 }
 
-static int pseries_dedicated_idle(void)
-{
+static void pseries_dedicated_idle(void)
+{ 
        long oldval;
        struct paca_struct *lpaca = get_paca();
        unsigned int cpu = smp_processor_id();
@@ -544,7 +540,7 @@ static int pseries_dedicated_idle(void)
        }
 }
 
-static int pseries_shared_idle(void)
+static void pseries_shared_idle(void)
 {
        struct paca_struct *lpaca = get_paca();
        unsigned int cpu = smp_processor_id();
@@ -586,8 +582,6 @@ static int pseries_shared_idle(void)
                if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
                        cpu_die();
        }
-
-       return 0;
 }
 
 static int pSeries_pci_probe_mode(struct pci_bus *bus)
@@ -601,7 +595,7 @@ struct machdep_calls __initdata pSeries_md = {
        .probe                  = pSeries_probe,
        .setup_arch             = pSeries_setup_arch,
        .init_early             = pSeries_init_early,
-       .get_cpuinfo            = pSeries_get_cpuinfo,
+       .show_cpuinfo           = pSeries_show_cpuinfo,
        .log_error              = pSeries_log_error,
        .pcibios_fixup          = pSeries_final_fixup,
        .pci_probe_mode         = pSeries_pci_probe_mode,
similarity index 92%
rename from arch/ppc64/kernel/pSeries_smp.c
rename to arch/powerpc/platforms/pseries/smp.c
index d2c7e2c..ae1bd27 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * SMP support for pSeries and BPA machines.
+ * SMP support for pSeries machines.
  *
  * Dave Engebretsen, Peter Bergner, and
  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
@@ -46,9 +46,7 @@
 #include <asm/rtas.h>
 #include <asm/plpar_wrappers.h>
 #include <asm/pSeries_reconfig.h>
-
-#include "mpic.h"
-#include "bpa_iic.h"
+#include <asm/mpic.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -343,36 +341,6 @@ static void __devinit smp_xics_setup_cpu(int cpu)
 
 }
 #endif /* CONFIG_XICS */
-#ifdef CONFIG_BPA_IIC
-static void smp_iic_message_pass(int target, int msg)
-{
-       unsigned int i;
-
-       if (target < NR_CPUS) {
-               iic_cause_IPI(target, msg);
-       } else {
-               for_each_online_cpu(i) {
-                       if (target == MSG_ALL_BUT_SELF
-                           && i == smp_processor_id())
-                               continue;
-                       iic_cause_IPI(i, msg);
-               }
-       }
-}
-
-static int __init smp_iic_probe(void)
-{
-       iic_request_IPIs();
-
-       return cpus_weight(cpu_possible_map);
-}
-
-static void __devinit smp_iic_setup_cpu(int cpu)
-{
-       if (cpu != boot_cpuid)
-               iic_setup_cpu();
-}
-#endif /* CONFIG_BPA_IIC */
 
 static DEFINE_SPINLOCK(timebase_lock);
 static unsigned long timebase = 0;
@@ -444,15 +412,6 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
        .cpu_bootable   = smp_pSeries_cpu_bootable,
 };
 #endif
-#ifdef CONFIG_BPA_IIC
-static struct smp_ops_t bpa_iic_smp_ops = {
-       .message_pass   = smp_iic_message_pass,
-       .probe          = smp_iic_probe,
-       .kick_cpu       = smp_pSeries_kick_cpu,
-       .setup_cpu      = smp_iic_setup_cpu,
-       .cpu_bootable   = smp_pSeries_cpu_bootable,
-};
-#endif
 
 /* This is called very early */
 void __init smp_init_pSeries(void)
@@ -472,11 +431,6 @@ void __init smp_init_pSeries(void)
                smp_ops = &pSeries_xics_smp_ops;
                break;
 #endif
-#ifdef CONFIG_BPA_IIC
-       case IC_BPA_IIC:
-               smp_ops = &bpa_iic_smp_ops;
-               break;
-#endif
        default:
                panic("Invalid interrupt controller");
        }
similarity index 99%
rename from arch/ppc64/kernel/pSeries_vio.c
rename to arch/powerpc/platforms/pseries/vio.c
index e0ae06f..866379b 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/prom.h>
 #include <asm/vio.h>
 #include <asm/hvcall.h>
+#include <asm/tce.h>
 
 extern struct subsystem devices_subsys; /* needed for vio_find_name() */
 
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
new file mode 100644 (file)
index 0000000..c649f03
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MPIC)             += mpic.o
+indirectpci-$(CONFIG_PPC_PMAC) = indirect_pci.o
+obj-$(CONFIG_PPC32)            += $(indirectpci-y)
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
new file mode 100644 (file)
index 0000000..e714884
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Support for indirect PCI bridges.
+ *
+ * Copyright (C) 1998 Gabriel Paubert.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+
+#ifdef CONFIG_PPC_INDIRECT_PCI_BE
+#define PCI_CFG_OUT out_be32
+#else
+#define PCI_CFG_OUT out_le32
+#endif
+
+static int
+indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
+                    int len, u32 *val)
+{
+       struct pci_controller *hose = bus->sysdata;
+       volatile void __iomem *cfg_data;
+       u8 cfg_type = 0;
+
+       if (ppc_md.pci_exclude_device)
+               if (ppc_md.pci_exclude_device(bus->number, devfn))
+                       return PCIBIOS_DEVICE_NOT_FOUND;
+       
+       if (hose->set_cfg_type)
+               if (bus->number != hose->first_busno)
+                       cfg_type = 1;
+
+       PCI_CFG_OUT(hose->cfg_addr,                                      
+                (0x80000000 | ((bus->number - hose->bus_offset) << 16)
+                 | (devfn << 8) | ((offset & 0xfc) | cfg_type)));
+
+       /*
+        * Note: the caller has already checked that offset is
+        * suitably aligned and that len is 1, 2 or 4.
+        */
+       cfg_data = hose->cfg_data + (offset & 3);
+       switch (len) {
+       case 1:
+               *val = in_8(cfg_data);
+               break;
+       case 2:
+               *val = in_le16(cfg_data);
+               break;
+       default:
+               *val = in_le32(cfg_data);
+               break;
+       }
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
+                     int len, u32 val)
+{
+       struct pci_controller *hose = bus->sysdata;
+       volatile void __iomem *cfg_data;
+       u8 cfg_type = 0;
+
+       if (ppc_md.pci_exclude_device)
+               if (ppc_md.pci_exclude_device(bus->number, devfn))
+                       return PCIBIOS_DEVICE_NOT_FOUND;
+
+       if (hose->set_cfg_type)
+               if (bus->number != hose->first_busno)
+                       cfg_type = 1;
+
+       PCI_CFG_OUT(hose->cfg_addr,                                      
+                (0x80000000 | ((bus->number - hose->bus_offset) << 16)
+                 | (devfn << 8) | ((offset & 0xfc) | cfg_type)));
+
+       /*
+        * Note: the caller has already checked that offset is
+        * suitably aligned and that len is 1, 2 or 4.
+        */
+       cfg_data = hose->cfg_data + (offset & 3);
+       switch (len) {
+       case 1:
+               out_8(cfg_data, val);
+               break;
+       case 2:
+               out_le16(cfg_data, val);
+               break;
+       default:
+               out_le32(cfg_data, val);
+               break;
+       }
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops indirect_pci_ops =
+{
+       indirect_read_config,
+       indirect_write_config
+};
+
+void __init
+setup_indirect_pci_nomap(struct pci_controller* hose, void __iomem * cfg_addr,
+       void __iomem * cfg_data)
+{
+       hose->cfg_addr = cfg_addr;
+       hose->cfg_data = cfg_data;
+       hose->ops = &indirect_pci_ops;
+}
+
+void __init
+setup_indirect_pci(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data)
+{
+       unsigned long base = cfg_addr & PAGE_MASK;
+       void __iomem *mbase, *addr, *data;
+
+       mbase = ioremap(base, PAGE_SIZE);
+       addr = mbase + (cfg_addr & ~PAGE_MASK);
+       if ((cfg_data & PAGE_MASK) != base)
+               mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
+       data = mbase + (cfg_data & ~PAGE_MASK);
+       setup_indirect_pci_nomap(hose, addr, data);
+}
similarity index 96%
rename from arch/ppc64/kernel/mpic.c
rename to arch/powerpc/sysdev/mpic.c
index cc262a0..3948e75 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  arch/ppc64/kernel/mpic.c
+ *  arch/powerpc/kernel/mpic.c
  *
  *  Driver for interrupt controllers following the OpenPIC standard, the
  *  common implementation beeing IBM's MPIC. This driver also can deal
@@ -31,8 +31,8 @@
 #include <asm/pgtable.h>
 #include <asm/irq.h>
 #include <asm/machdep.h>
-
-#include "mpic.h"
+#include <asm/mpic.h>
+#include <asm/smp.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) printk(fmt)
@@ -44,6 +44,9 @@ static struct mpic *mpics;
 static struct mpic *mpic_primary;
 static DEFINE_SPINLOCK(mpic_lock);
 
+#ifdef CONFIG_PPC32    /* XXX for now */
+#define distribute_irqs        CONFIG_IRQ_ALL_CPUS
+#endif
 
 /*
  * Register accessor functions
@@ -480,6 +483,7 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
        if (mpic == NULL)
                return NULL;
        
+
        memset(mpic, 0, sizeof(struct mpic));
        mpic->name = name;
 
@@ -700,7 +704,7 @@ void __init mpic_init(struct mpic *mpic)
                /* init hw */
                mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
                mpic_irq_write(i, MPIC_IRQ_DESTINATION,
-                              1 << get_hard_smp_processor_id(boot_cpuid));
+                              1 << hard_smp_processor_id());
 
                /* init linux descriptors */
                if (i < mpic->irq_count) {
@@ -792,6 +796,21 @@ void mpic_setup_this_cpu(void)
 #endif /* CONFIG_SMP */
 }
 
+int mpic_cpu_get_priority(void)
+{
+       struct mpic *mpic = mpic_primary;
+
+       return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
+}
+
+void mpic_cpu_set_priority(int prio)
+{
+       struct mpic *mpic = mpic_primary;
+
+       prio &= MPIC_CPU_TASKPRI_MASK;
+       mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
+}
+
 /*
  * XXX: someone who knows mpic should check this.
  * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
@@ -885,4 +904,25 @@ void mpic_request_ipis(void)
 
        printk("IPIs requested... \n");
 }
+
+void smp_mpic_message_pass(int target, int msg)
+{
+       /* make sure we're sending something that translates to an IPI */
+       if ((unsigned int)msg > 3) {
+               printk("SMP %d: smp_message_pass: unknown msg %d\n",
+                      smp_processor_id(), msg);
+               return;
+       }
+       switch (target) {
+       case MSG_ALL:
+               mpic_send_ipi(msg, 0xffffffff);
+               break;
+       case MSG_ALL_BUT_SELF:
+               mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
+               break;
+       default:
+               mpic_send_ipi(msg, 1 << target);
+               break;
+       }
+}
 #endif /* CONFIG_SMP */
index 776941c..ed9c972 100644 (file)
@@ -747,12 +747,12 @@ config CPM2
          on it (826x, 827x, 8560).
 
 config PPC_CHRP
-       bool
+       bool "  Common Hardware Reference Platform (CHRP) based machines"
        depends on PPC_MULTIPLATFORM
        default y
 
 config PPC_PMAC
-       bool
+       bool "  Apple PowerMac based machines"
        depends on PPC_MULTIPLATFORM
        default y
 
@@ -762,7 +762,7 @@ config PPC_PMAC64
        default y
 
 config PPC_PREP
-       bool
+       bool "  PowerPC Reference Platform (PReP) based machines"
        depends on PPC_MULTIPLATFORM
        default y
 
@@ -1368,7 +1368,7 @@ endmenu
 
 source "lib/Kconfig"
 
-source "arch/ppc/oprofile/Kconfig"
+source "arch/powerpc/oprofile/Kconfig"
 
 source "arch/ppc/Kconfig.debug"
 
index 16e2675..aedc9ae 100644 (file)
@@ -57,9 +57,10 @@ head-$(CONFIG_FSL_BOOKE)     := arch/ppc/kernel/head_fsl_booke.o
 
 head-$(CONFIG_6xx)             += arch/ppc/kernel/idle_6xx.o
 head-$(CONFIG_POWER4)          += arch/ppc/kernel/idle_power4.o
-head-$(CONFIG_PPC_FPU)         += arch/ppc/kernel/fpu.o
+head-$(CONFIG_PPC_FPU)         += arch/powerpc/kernel/fpu.o
 
-core-y                         += arch/ppc/kernel/ arch/ppc/platforms/ \
+core-y                         += arch/ppc/kernel/ arch/powerpc/kernel/ \
+                                  arch/ppc/platforms/ \
                                   arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/
 core-$(CONFIG_4xx)             += arch/ppc/platforms/4xx/
 core-$(CONFIG_83xx)            += arch/ppc/platforms/83xx/
@@ -71,7 +72,7 @@ drivers-$(CONFIG_8xx)         += arch/ppc/8xx_io/
 drivers-$(CONFIG_4xx)          += arch/ppc/4xx_io/
 drivers-$(CONFIG_CPM2)         += arch/ppc/8260_io/
 
-drivers-$(CONFIG_OPROFILE)     += arch/ppc/oprofile/
+drivers-$(CONFIG_OPROFILE)     += arch/powerpc/oprofile/
 
 BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
 
index b1457a8..b35346d 100644 (file)
@@ -1,6 +1,7 @@
 #
 # Makefile for the linux kernel.
 #
+ifneq ($(CONFIG_PPC_MERGE),y)
 
 extra-$(CONFIG_PPC_STD_MMU)    := head.o
 extra-$(CONFIG_40x)            := head_4xx.o
@@ -9,13 +10,12 @@ extra-$(CONFIG_FSL_BOOKE)    := head_fsl_booke.o
 extra-$(CONFIG_8xx)            := head_8xx.o
 extra-$(CONFIG_6xx)            += idle_6xx.o
 extra-$(CONFIG_POWER4)         += idle_power4.o
-extra-$(CONFIG_PPC_FPU)                += fpu.o
 extra-y                                += vmlinux.lds
 
 obj-y                          := entry.o traps.o irq.o idle.o time.o misc.o \
-                                       process.o signal.o ptrace.o align.o \
-                                       semaphore.o syscalls.o setup.o \
-                                       cputable.o ppc_htab.o perfmon.o
+                                       process.o align.o \
+                                       setup.o \
+                                       ppc_htab.o
 obj-$(CONFIG_6xx)              += l2cr.o cpu_setup_6xx.o
 obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
 obj-$(CONFIG_POWER4)           += cpu_setup_power4.o
@@ -25,7 +25,6 @@ obj-$(CONFIG_PCI)             += pci.o
 obj-$(CONFIG_KGDB)             += ppc-stub.o
 obj-$(CONFIG_SMP)              += smp.o smp-tbsync.o
 obj-$(CONFIG_TAU)              += temp.o
-obj-$(CONFIG_ALTIVEC)          += vecemu.o vector.o
 ifndef CONFIG_E200
 obj-$(CONFIG_FSL_BOOKE)                += perfmon_fsl_booke.o
 endif
@@ -35,3 +34,21 @@ ifndef CONFIG_MATH_EMULATION
 obj-$(CONFIG_8xx)              += softemu8xx.o
 endif
 
+# These are here while we do the architecture merge
+
+else
+obj-y                          := irq.o idle.o \
+                                       align.o
+obj-$(CONFIG_6xx)              += l2cr.o cpu_setup_6xx.o
+obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
+obj-$(CONFIG_MODULES)          += module.o
+obj-$(CONFIG_NOT_COHERENT_CACHE)       += dma-mapping.o
+obj-$(CONFIG_PCI)              += pci.o
+obj-$(CONFIG_KGDB)             += ppc-stub.o
+obj-$(CONFIG_SMP)              += smp.o smp-tbsync.o
+obj-$(CONFIG_TAU)              += temp.o
+ifndef CONFIG_E200
+obj-$(CONFIG_FSL_BOOKE)                += perfmon_fsl_booke.o
+endif
+obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
+endif
index d9ad1d7..968261d 100644 (file)
@@ -130,10 +130,10 @@ main(void)
        DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
        DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
 
+       DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
        DEFINE(TI_TASK, offsetof(struct thread_info, task));
        DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
        DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
-       DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
        DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
        DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
 
@@ -141,6 +141,7 @@ main(void)
        DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
        DEFINE(pbe_next, offsetof(struct pbe, next));
 
+       DEFINE(TASK_SIZE, TASK_SIZE);
        DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
        return 0;
 }
index ba39643..55ed771 100644 (file)
@@ -17,8 +17,6 @@
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
 
-_GLOBAL(__setup_cpu_601)
-       blr
 _GLOBAL(__setup_cpu_603)
        b       setup_common_caches
 _GLOBAL(__setup_cpu_604)
@@ -292,10 +290,10 @@ _GLOBAL(__init_fpu_registers)
 #define CS_SIZE                32
 
        .data
-       .balign L1_CACHE_LINE_SIZE
+       .balign L1_CACHE_BYTES
 cpu_state_storage:
        .space  CS_SIZE
-       .balign L1_CACHE_LINE_SIZE,0
+       .balign L1_CACHE_BYTES,0
        .text
 
 /* Called in normal context to backup CPU 0 state. This
index 7e4fbb6..d7bfd60 100644 (file)
@@ -63,8 +63,6 @@ _GLOBAL(__970_cpu_preinit)
        isync
        blr
 
-_GLOBAL(__setup_cpu_power4)
-       blr
 _GLOBAL(__setup_cpu_ppc970)
        mfspr   r0,SPRN_HID0
        li      r11,5                   /* clear DOZE and SLEEP */
@@ -88,10 +86,10 @@ _GLOBAL(__setup_cpu_ppc970)
 #define CS_SIZE                32
 
        .data
-       .balign L1_CACHE_LINE_SIZE
+       .balign L1_CACHE_BYTES
 cpu_state_storage:     
        .space  CS_SIZE
-       .balign L1_CACHE_LINE_SIZE,0
+       .balign L1_CACHE_BYTES,0
        .text
        
 /* Called in normal context to backup CPU 0 state. This
index 03d4886..f044edb 100644 (file)
@@ -200,9 +200,8 @@ _GLOBAL(DoSyscall)
        bl      do_show_syscall
 #endif /* SHOW_SYSCALLS */
        rlwinm  r10,r1,0,0,18   /* current_thread_info() */
-       lwz     r11,TI_LOCAL_FLAGS(r10)
-       rlwinm  r11,r11,0,~_TIFL_FORCE_NOERROR
-       stw     r11,TI_LOCAL_FLAGS(r10)
+       li      r11,0
+       stb     r11,TI_SC_NOERR(r10)
        lwz     r11,TI_FLAGS(r10)
        andi.   r11,r11,_TIF_SYSCALL_T_OR_A
        bne-    syscall_dotrace
@@ -227,8 +226,8 @@ ret_from_syscall:
        cmplw   0,r3,r11
        rlwinm  r12,r1,0,0,18   /* current_thread_info() */
        blt+    30f
-       lwz     r11,TI_LOCAL_FLAGS(r12)
-       andi.   r11,r11,_TIFL_FORCE_NOERROR
+       lbz     r11,TI_SC_NOERR(r12)
+       cmpwi   r11,0
        bne     30f
        neg     r3,r3
        lwz     r10,_CCR(r1)    /* Set SO bit in CR */
@@ -633,7 +632,8 @@ sigreturn_exit:
        rlwinm  r12,r1,0,0,18   /* current_thread_info() */
        lwz     r9,TI_FLAGS(r12)
        andi.   r0,r9,_TIF_SYSCALL_T_OR_A
-       bnel-   do_syscall_trace_leave
+       beq+    ret_from_except_full
+       bl      do_syscall_trace_leave
        /* fall through */
 
        .globl  ret_from_except_full
index 1960fb8..c5a890d 100644 (file)
@@ -349,12 +349,12 @@ i##n:                                                             \
 
 /* System reset */
 /* core99 pmac starts the seconary here by changing the vector, and
-   putting it back to what it was (UnknownException) when done.  */
+   putting it back to what it was (unknown_exception) when done.  */
 #if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
        . = 0x100
        b       __secondary_start_gemini
 #else
-       EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD)
+       EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
 #endif
 
 /* Machine check */
@@ -389,7 +389,7 @@ i##n:                                                               \
        cmpwi   cr1,r4,0
        bne     cr1,1f
 #endif
-       EXC_XFER_STD(0x200, MachineCheckException)
+       EXC_XFER_STD(0x200, machine_check_exception)
 #ifdef CONFIG_PPC_CHRP
 1:     b       machine_check_in_rtas
 #endif
@@ -456,10 +456,10 @@ Alignment:
        mfspr   r5,SPRN_DSISR
        stw     r5,_DSISR(r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       EXC_XFER_EE(0x600, AlignmentException)
+       EXC_XFER_EE(0x600, alignment_exception)
 
 /* Program check exception */
-       EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD)
+       EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
 
 /* Floating-point unavailable */
        . = 0x800
@@ -467,13 +467,13 @@ FPUnavailable:
        EXCEPTION_PROLOG
        bne     load_up_fpu             /* if from user, just load it up */
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       EXC_XFER_EE_LITE(0x800, KernelFP)
+       EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
 
 /* Decrementer */
        EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
 
-       EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
 
 /* System call */
        . = 0xc00
@@ -482,8 +482,8 @@ SystemCall:
        EXC_XFER_EE_LITE(0xc00, DoSyscall)
 
 /* Single step - not used on 601 */
-       EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD)
-       EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
+       EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
 
 /*
  * The Altivec unavailable trap is at 0x0f20.  Foo.
@@ -502,7 +502,7 @@ SystemCall:
 Trap_0f:
        EXCEPTION_PROLOG
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       EXC_XFER_EE(0xf00, UnknownException)
+       EXC_XFER_EE(0xf00, unknown_exception)
 
 /*
  * Handle TLB miss for instruction on 603/603e.
@@ -702,44 +702,44 @@ DataStoreTLBMiss:
        rfi
 
 #ifndef CONFIG_ALTIVEC
-#define AltivecAssistException UnknownException
+#define altivec_assist_exception       unknown_exception
 #endif
 
-       EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE)
+       EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
        EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
-       EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
 #ifdef CONFIG_POWER4
-       EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE)
+       EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1700, Trap_17, altivec_assist_exception, EXC_XFER_EE)
        EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD)
 #else /* !CONFIG_POWER4 */
-       EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE)
+       EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
        EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
-       EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
 #endif /* CONFIG_POWER4 */
-       EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
        EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
-       EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE)
+       EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
 
        .globl mol_trampoline
        .set mol_trampoline, i0x2f00
@@ -751,7 +751,7 @@ AltiVecUnavailable:
 #ifdef CONFIG_ALTIVEC
        bne     load_up_altivec         /* if from user, just load it up */
 #endif /* CONFIG_ALTIVEC */
-       EXC_XFER_EE_LITE(0xf20, AltivecUnavailException)
+       EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
 
 #ifdef CONFIG_PPC64BRIDGE
 DataAccess:
@@ -767,12 +767,12 @@ DataSegment:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        mfspr   r4,SPRN_DAR
        stw     r4,_DAR(r11)
-       EXC_XFER_STD(0x380, UnknownException)
+       EXC_XFER_STD(0x380, unknown_exception)
 
 InstructionSegment:
        EXCEPTION_PROLOG
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       EXC_XFER_STD(0x480, UnknownException)
+       EXC_XFER_STD(0x480, unknown_exception)
 #endif /* CONFIG_PPC64BRIDGE */
 
 #ifdef CONFIG_ALTIVEC
@@ -804,7 +804,7 @@ load_up_altivec:
        beq     1f
        add     r4,r4,r6
        addi    r4,r4,THREAD    /* want THREAD of last_task_used_altivec */
-       SAVE_32VR(0,r10,r4)
+       SAVE_32VRS(0,r10,r4)
        mfvscr  vr0
        li      r10,THREAD_VSCR
        stvx    vr0,r10,r4
@@ -824,7 +824,7 @@ load_up_altivec:
        stw     r4,THREAD_USED_VR(r5)
        lvx     vr0,r10,r5
        mtvscr  vr0
-       REST_32VR(0,r10,r5)
+       REST_32VRS(0,r10,r5)
 #ifndef CONFIG_SMP
        subi    r4,r5,THREAD
        sub     r4,r4,r6
@@ -870,7 +870,7 @@ giveup_altivec:
        addi    r3,r3,THREAD            /* want THREAD of task */
        lwz     r5,PT_REGS(r3)
        cmpwi   0,r5,0
-       SAVE_32VR(0, r4, r3)
+       SAVE_32VRS(0, r4, r3)
        mfvscr  vr0
        li      r4,THREAD_VSCR
        stvx    vr0,r4,r3
@@ -916,7 +916,7 @@ relocate_kernel:
 copy_and_flush:
        addi    r5,r5,-4
        addi    r6,r6,-4
-4:     li      r0,L1_CACHE_LINE_SIZE/4
+4:     li      r0,L1_CACHE_BYTES/4
        mtctr   r0
 3:     addi    r6,r6,4                 /* copy a cache line */
        lwzx    r0,r6,r4
@@ -1059,7 +1059,6 @@ __secondary_start:
 
        lis     r3,-KERNELBASE@h
        mr      r4,r24
-       bl      identify_cpu
        bl      call_setup_cpu          /* Call setup_cpu for this CPU */
 #ifdef CONFIG_6xx
        lis     r3,-KERNELBASE@h
@@ -1109,11 +1108,6 @@ __secondary_start:
  * Those generic dummy functions are kept for CPUs not
  * included in CONFIG_6xx
  */
-_GLOBAL(__setup_cpu_power3)
-       blr
-_GLOBAL(__setup_cpu_generic)
-       blr
-
 #if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4)
 _GLOBAL(__save_cpu_setup)
        blr
index 599245b..8b49679 100644 (file)
@@ -309,13 +309,13 @@ skpinv:   addi    r4,r4,1                         /* Increment */
 
 interrupt_base:
        /* Critical Input Interrupt */
-       CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException)
+       CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
 
        /* Machine Check Interrupt */
 #ifdef CONFIG_440A
-       MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+       MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
 #else
-       CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+       CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
 #endif
 
        /* Data Storage Interrupt */
@@ -442,7 +442,7 @@ interrupt_base:
 #ifdef CONFIG_PPC_FPU
        FP_UNAVAILABLE_EXCEPTION
 #else
-       EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
 #endif
 
        /* System Call Interrupt */
@@ -451,21 +451,21 @@ interrupt_base:
        EXC_XFER_EE_LITE(0x0c00, DoSyscall)
 
        /* Auxillary Processor Unavailable Interrupt */
-       EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
 
        /* Decrementer Interrupt */
        DECREMENTER_EXCEPTION
 
        /* Fixed Internal Timer Interrupt */
        /* TODO: Add FIT support */
-       EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
 
        /* Watchdog Timer Interrupt */
        /* TODO: Add watchdog support */
 #ifdef CONFIG_BOOKE_WDT
        CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
 #else
-       CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException)
+       CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
 #endif
 
        /* Data TLB Error Interrupt */
@@ -743,14 +743,18 @@ _GLOBAL(set_context)
  * goes at the beginning of the data segment, which is page-aligned.
  */
        .data
-_GLOBAL(sdata)
-_GLOBAL(empty_zero_page)
+       .align  12
+       .globl  sdata
+sdata:
+       .globl  empty_zero_page
+empty_zero_page:
        .space  4096
 
 /*
  * To support >32-bit physical addresses, we use an 8KB pgdir.
  */
-_GLOBAL(swapper_pg_dir)
+       .globl  swapper_pg_dir
+swapper_pg_dir:
        .space  8192
 
 /* Reserved 4k for the critical exception stack & 4k for the machine
@@ -759,13 +763,15 @@ _GLOBAL(swapper_pg_dir)
         .align 12
 exception_stack_bottom:
        .space  BOOKE_EXCEPTION_STACK_SIZE
-_GLOBAL(exception_stack_top)
+       .globl  exception_stack_top
+exception_stack_top:
 
 /*
  * This space gets a copy of optional info passed to us by the bootstrap
  * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
  */
-_GLOBAL(cmd_line)
+       .globl  cmd_line
+cmd_line:
        .space  512
 
 /*
@@ -774,5 +780,3 @@ _GLOBAL(cmd_line)
  */
 abatron_pteptrs:
        .space  8
-
-
index 8562b80..10c261c 100644 (file)
@@ -245,12 +245,12 @@ label:
 /*
  * 0x0100 - Critical Interrupt Exception
  */
-       CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException)
+       CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
 
 /*
  * 0x0200 - Machine Check Exception
  */
-       CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+       CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
 
 /*
  * 0x0300 - Data Storage Exception
@@ -405,7 +405,7 @@ label:
        mfspr   r4,SPRN_DEAR            /* Grab the DEAR and save it */
        stw     r4,_DEAR(r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       EXC_XFER_EE(0x600, AlignmentException)
+       EXC_XFER_EE(0x600, alignment_exception)
 
 /* 0x0700 - Program Exception */
        START_EXCEPTION(0x0700, ProgramCheck)
@@ -413,21 +413,21 @@ label:
        mfspr   r4,SPRN_ESR             /* Grab the ESR and save it */
        stw     r4,_ESR(r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       EXC_XFER_STD(0x700, ProgramCheckException)
+       EXC_XFER_STD(0x700, program_check_exception)
 
-       EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
 
 /* 0x0C00 - System Call Exception */
        START_EXCEPTION(0x0C00, SystemCall)
        NORMAL_EXCEPTION_PROLOG
        EXC_XFER_EE_LITE(0xc00, DoSyscall)
 
-       EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
 
 /* 0x1000 - Programmable Interval Timer (PIT) Exception */
        START_EXCEPTION(0x1000, Decrementer)
@@ -444,14 +444,14 @@ label:
 
 /* 0x1010 - Fixed Interval Timer (FIT) Exception
 */
-       STND_EXCEPTION(0x1010,  FITException,           UnknownException)
+       STND_EXCEPTION(0x1010,  FITException,           unknown_exception)
 
 /* 0x1020 - Watchdog Timer (WDT) Exception
 */
 #ifdef CONFIG_BOOKE_WDT
        CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
 #else
-       CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException)
+       CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
 #endif
 #endif
 
@@ -656,25 +656,25 @@ label:
        mfspr   r10, SPRN_SPRG0
        b       InstructionAccess
 
-       EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
 #ifdef CONFIG_IBM405_ERR51
        /* 405GP errata 51 */
        START_EXCEPTION(0x1700, Trap_17)
        b DTLBMiss
 #else
-       EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
 #endif
-       EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
 
 /* Check for a single step debug exception while in an exception
  * handler before state has been saved.  This is to catch the case
@@ -988,10 +988,14 @@ _GLOBAL(set_context)
  * goes at the beginning of the data segment, which is page-aligned.
  */
        .data
-_GLOBAL(sdata)
-_GLOBAL(empty_zero_page)
+       .align  12
+       .globl  sdata
+sdata:
+       .globl  empty_zero_page
+empty_zero_page:
        .space  4096
-_GLOBAL(swapper_pg_dir)
+       .globl  swapper_pg_dir
+swapper_pg_dir:
        .space  4096
 
 
@@ -1001,12 +1005,14 @@ _GLOBAL(swapper_pg_dir)
 exception_stack_bottom:
        .space  4096
 critical_stack_top:
-_GLOBAL(exception_stack_top)
+       .globl  exception_stack_top
+exception_stack_top:
 
 /* This space gets a copy of optional info passed to us by the bootstrap
  * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
  */
-_GLOBAL(cmd_line)
+       .globl  cmd_line
+cmd_line:
        .space  512
 
 /* Room for two PTE pointers, usually the kernel and current user pointers
index cb1a3a5..de09787 100644 (file)
@@ -203,7 +203,7 @@ i##n:                                                               \
                          ret_from_except)
 
 /* System reset */
-       EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD)
+       EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
 
 /* Machine check */
        . = 0x200
@@ -214,7 +214,7 @@ MachineCheck:
        mfspr r5,SPRN_DSISR
        stw r5,_DSISR(r11)
        addi r3,r1,STACK_FRAME_OVERHEAD
-       EXC_XFER_STD(0x200, MachineCheckException)
+       EXC_XFER_STD(0x200, machine_check_exception)
 
 /* Data access exception.
  * This is "never generated" by the MPC8xx.  We jump to it for other
@@ -252,20 +252,20 @@ Alignment:
        mfspr   r5,SPRN_DSISR
        stw     r5,_DSISR(r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       EXC_XFER_EE(0x600, AlignmentException)
+       EXC_XFER_EE(0x600, alignment_exception)
 
 /* Program check exception */
-       EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD)
+       EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
 
 /* No FPU on MPC8xx.  This exception is not supposed to happen.
 */
-       EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD)
+       EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
 
 /* Decrementer */
        EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
 
-       EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
 
 /* System call */
        . = 0xc00
@@ -274,9 +274,9 @@ SystemCall:
        EXC_XFER_EE_LITE(0xc00, DoSyscall)
 
 /* Single step - not used on 601 */
-       EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD)
-       EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
+       EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
 
 /* On the MPC8xx, this is a software emulation interrupt.  It occurs
  * for all unimplemented and illegal instructions.
@@ -540,22 +540,22 @@ DataTLBError:
 #endif
        b       DataAccess
 
-       EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
 
 /* On the MPC8xx, these next four traps are used for development
  * support of breakpoints and such.  Someday I will get around to
  * using them.
  */
-       EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
 
        . = 0x2000
 
index 9342acf..aeb349b 100644 (file)
@@ -335,7 +335,7 @@ label:
        mfspr   r4,SPRN_DEAR;           /* Grab the DEAR and save it */       \
        stw     r4,_DEAR(r11);                                                \
        addi    r3,r1,STACK_FRAME_OVERHEAD;                                   \
-       EXC_XFER_EE(0x0600, AlignmentException)
+       EXC_XFER_EE(0x0600, alignment_exception)
 
 #define PROGRAM_EXCEPTION                                                    \
        START_EXCEPTION(Program)                                              \
@@ -343,7 +343,7 @@ label:
        mfspr   r4,SPRN_ESR;            /* Grab the ESR and save it */        \
        stw     r4,_ESR(r11);                                                 \
        addi    r3,r1,STACK_FRAME_OVERHEAD;                                   \
-       EXC_XFER_STD(0x0700, ProgramCheckException)
+       EXC_XFER_STD(0x0700, program_check_exception)
 
 #define DECREMENTER_EXCEPTION                                                \
        START_EXCEPTION(Decrementer)                                          \
index 8e52e84..5063c60 100644 (file)
@@ -426,14 +426,14 @@ skpinv:   addi    r6,r6,1                         /* Increment */
 
 interrupt_base:
        /* Critical Input Interrupt */
-       CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException)
+       CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
 
        /* Machine Check Interrupt */
 #ifdef CONFIG_E200
        /* no RFMCI, MCSRRs on E200 */
-       CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+       CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
 #else
-       MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+       MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
 #endif
 
        /* Data Storage Interrupt */
@@ -542,9 +542,9 @@ interrupt_base:
 #else
 #ifdef CONFIG_E200
        /* E200 treats 'normal' floating point instructions as FP Unavail exception */
-       EXCEPTION(0x0800, FloatingPointUnavailable, ProgramCheckException, EXC_XFER_EE)
+       EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
 #else
-       EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
 #endif
 #endif
 
@@ -554,20 +554,20 @@ interrupt_base:
        EXC_XFER_EE_LITE(0x0c00, DoSyscall)
 
        /* Auxillary Processor Unavailable Interrupt */
-       EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
 
        /* Decrementer Interrupt */
        DECREMENTER_EXCEPTION
 
        /* Fixed Internal Timer Interrupt */
        /* TODO: Add FIT support */
-       EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
 
        /* Watchdog Timer Interrupt */
 #ifdef CONFIG_BOOKE_WDT
        CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
 #else
-       CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException)
+       CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
 #endif
 
        /* Data TLB Error Interrupt */
@@ -696,21 +696,21 @@ interrupt_base:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        EXC_XFER_EE_LITE(0x2010, KernelSPE)
 #else
-       EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
 #endif /* CONFIG_SPE */
 
        /* SPE Floating Point Data */
 #ifdef CONFIG_SPE
        EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
 #else
-       EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
 #endif /* CONFIG_SPE */
 
        /* SPE Floating Point Round */
-       EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE)
+       EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
 
        /* Performance Monitor */
-       EXCEPTION(0x2060, PerformanceMonitor, PerformanceMonitorException, EXC_XFER_STD)
+       EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
 
 
        /* Debug Interrupt */
@@ -853,7 +853,7 @@ load_up_spe:
        cmpi    0,r4,0
        beq     1f
        addi    r4,r4,THREAD    /* want THREAD of last_task_used_spe */
-       SAVE_32EVR(0,r10,r4)
+       SAVE_32EVRS(0,r10,r4)
        evxor   evr10, evr10, evr10     /* clear out evr10 */
        evmwumiaa evr10, evr10, evr10   /* evr10 <- ACC = 0 * 0 + ACC */
        li      r5,THREAD_ACC
@@ -873,7 +873,7 @@ load_up_spe:
        stw     r4,THREAD_USED_SPE(r5)
        evlddx  evr4,r10,r5
        evmra   evr4,evr4
-       REST_32EVR(0,r10,r5)
+       REST_32EVRS(0,r10,r5)
 #ifndef CONFIG_SMP
        subi    r4,r5,THREAD
        stw     r4,last_task_used_spe@l(r3)
@@ -963,7 +963,7 @@ _GLOBAL(giveup_spe)
        addi    r3,r3,THREAD            /* want THREAD of task */
        lwz     r5,PT_REGS(r3)
        cmpi    0,r5,0
-       SAVE_32EVR(0, r4, r3)
+       SAVE_32EVRS(0, r4, r3)
        evxor   evr6, evr6, evr6        /* clear out evr6 */
        evmwumiaa evr6, evr6, evr6      /* evr6 <- ACC = 0 * 0 + ACC */
        li      r4,THREAD_ACC
@@ -1028,10 +1028,14 @@ _GLOBAL(set_context)
  * goes at the beginning of the data segment, which is page-aligned.
  */
        .data
-_GLOBAL(sdata)
-_GLOBAL(empty_zero_page)
+       .align  12
+       .globl  sdata
+sdata:
+       .globl  empty_zero_page
+empty_zero_page:
        .space  4096
-_GLOBAL(swapper_pg_dir)
+       .globl  swapper_pg_dir
+swapper_pg_dir:
        .space  4096
 
 /* Reserved 4k for the critical exception stack & 4k for the machine
@@ -1040,13 +1044,15 @@ _GLOBAL(swapper_pg_dir)
         .align 12
 exception_stack_bottom:
        .space  BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
-_GLOBAL(exception_stack_top)
+       .globl  exception_stack_top
+exception_stack_top:
 
 /*
  * This space gets a copy of optional info passed to us by the bootstrap
  * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
  */
-_GLOBAL(cmd_line)
+       .globl  cmd_line
+cmd_line:
        .space  512
 
 /*
@@ -1055,4 +1061,3 @@ _GLOBAL(cmd_line)
  */
 abatron_pteptrs:
        .space  8
-
index fba29c8..11e5b44 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/cache.h>
 #include <asm/cputable.h>
 #include <asm/machdep.h>
+#include <asm/smp.h>
 
 void default_idle(void)
 {
@@ -74,7 +75,7 @@ void cpu_idle(void)
 /*
  * Register the sysctl to set/clear powersave_nap.
  */
-extern unsigned long powersave_nap;
+extern int powersave_nap;
 
 static ctl_table powersave_nap_ctl_table[]={
        {
index 8843f3a..772e428 100644 (file)
@@ -57,6 +57,7 @@
 #include <asm/cache.h>
 #include <asm/prom.h>
 #include <asm/ptrace.h>
+#include <asm/machdep.h>
 
 #define NR_MASK_WORDS  ((NR_IRQS + 31) / 32)
 
index 8611152..d7f4e98 100644 (file)
@@ -203,7 +203,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
         * L1 icache
         */
        b       20f
-       .balign L1_CACHE_LINE_SIZE
+       .balign L1_CACHE_BYTES
 22:
        sync
        mtspr   SPRN_L2CR,r3
index 90d917d..2350f3e 100644 (file)
@@ -125,9 +125,8 @@ _GLOBAL(identify_cpu)
 1:
        addis   r6,r3,cur_cpu_spec@ha
        addi    r6,r6,cur_cpu_spec@l
-       slwi    r4,r4,2
        sub     r8,r8,r3
-       stwx    r8,r4,r6
+       stw     r8,0(r6)
        blr
 
 /*
@@ -186,19 +185,18 @@ _GLOBAL(do_cpu_ftr_fixups)
  *
  * Setup function is called with:
  *   r3 = data offset
- *   r4 = CPU number
- *   r5 = ptr to CPU spec (relocated)
+ *   r4 = ptr to CPU spec (relocated)
  */
 _GLOBAL(call_setup_cpu)
-       addis   r5,r3,cur_cpu_spec@ha
-       addi    r5,r5,cur_cpu_spec@l
-       slwi    r4,r24,2
-       lwzx    r5,r4,r5
+       addis   r4,r3,cur_cpu_spec@ha
+       addi    r4,r4,cur_cpu_spec@l
+       lwz     r4,0(r4)
+       add     r4,r4,r3
+       lwz     r5,CPU_SPEC_SETUP(r4)
+       cmpi    0,r5,0
        add     r5,r5,r3
-       lwz     r6,CPU_SPEC_SETUP(r5)
-       add     r6,r6,r3
-       mtctr   r6
-       mr      r4,r24
+       beqlr
+       mtctr   r5
        bctr
 
 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
@@ -273,134 +271,6 @@ _GLOBAL(low_choose_7447a_dfs)
 
 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
 
-/* void local_save_flags_ptr(unsigned long *flags) */
-_GLOBAL(local_save_flags_ptr)
-       mfmsr   r4
-       stw     r4,0(r3)
-       blr
-       /*
-        * Need these nops here for taking over save/restore to
-        * handle lost intrs
-        * -- Cort
-        */
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-_GLOBAL(local_save_flags_ptr_end)
-
-/* void local_irq_restore(unsigned long flags) */
-_GLOBAL(local_irq_restore)
-/*
- * Just set/clear the MSR_EE bit through restore/flags but do not
- * change anything else.  This is needed by the RT system and makes
- * sense anyway.
- *    -- Cort
- */
-       mfmsr   r4
-       /* Copy all except the MSR_EE bit from r4 (current MSR value)
-          to r3.  This is the sort of thing the rlwimi instruction is
-          designed for.  -- paulus. */
-       rlwimi  r3,r4,0,17,15
-        /* Check if things are setup the way we want _already_. */
-       cmpw    0,r3,r4
-       beqlr
-1:     SYNC
-       mtmsr   r3
-       SYNC
-       blr
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-_GLOBAL(local_irq_restore_end)
-
-_GLOBAL(local_irq_disable)
-       mfmsr   r0              /* Get current interrupt state */
-       rlwinm  r3,r0,16+1,32-1,31      /* Extract old value of 'EE' */
-       rlwinm  r0,r0,0,17,15   /* clear MSR_EE in r0 */
-       SYNC                    /* Some chip revs have problems here... */
-       mtmsr   r0              /* Update machine state */
-       blr                     /* Done */
-       /*
-        * Need these nops here for taking over save/restore to
-        * handle lost intrs
-        * -- Cort
-        */
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-_GLOBAL(local_irq_disable_end)
-
-_GLOBAL(local_irq_enable)
-       mfmsr   r3              /* Get current state */
-       ori     r3,r3,MSR_EE    /* Turn on 'EE' bit */
-       SYNC                    /* Some chip revs have problems here... */
-       mtmsr   r3              /* Update machine state */
-       blr
-       /*
-        * Need these nops here for taking over save/restore to
-        * handle lost intrs
-        * -- Cort
-        */
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-       nop
-_GLOBAL(local_irq_enable_end)
-
 /*
  * complement mask on the msr then "or" some values on.
  *     _nmask_and_or_msr(nmask, value_to_or)
@@ -628,21 +498,21 @@ _GLOBAL(flush_icache_range)
 BEGIN_FTR_SECTION
        blr                             /* for 601, do nothing */
 END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
-       li      r5,L1_CACHE_LINE_SIZE-1
+       li      r5,L1_CACHE_BYTES-1
        andc    r3,r3,r5
        subf    r4,r3,r4
        add     r4,r4,r5
-       srwi.   r4,r4,LG_L1_CACHE_LINE_SIZE
+       srwi.   r4,r4,L1_CACHE_SHIFT
        beqlr
        mtctr   r4
        mr      r6,r3
 1:     dcbst   0,r3
-       addi    r3,r3,L1_CACHE_LINE_SIZE
+       addi    r3,r3,L1_CACHE_BYTES
        bdnz    1b
        sync                            /* wait for dcbst's to get to ram */
        mtctr   r4
 2:     icbi    0,r6
-       addi    r6,r6,L1_CACHE_LINE_SIZE
+       addi    r6,r6,L1_CACHE_BYTES
        bdnz    2b
        sync                            /* additional sync needed on g4 */
        isync
@@ -655,16 +525,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  * clean_dcache_range(unsigned long start, unsigned long stop)
  */
 _GLOBAL(clean_dcache_range)
-       li      r5,L1_CACHE_LINE_SIZE-1
+       li      r5,L1_CACHE_BYTES-1
        andc    r3,r3,r5
        subf    r4,r3,r4
        add     r4,r4,r5
-       srwi.   r4,r4,LG_L1_CACHE_LINE_SIZE
+       srwi.   r4,r4,L1_CACHE_SHIFT
        beqlr
        mtctr   r4
 
 1:     dcbst   0,r3
-       addi    r3,r3,L1_CACHE_LINE_SIZE
+       addi    r3,r3,L1_CACHE_BYTES
        bdnz    1b
        sync                            /* wait for dcbst's to get to ram */
        blr
@@ -676,16 +546,16 @@ _GLOBAL(clean_dcache_range)
  * flush_dcache_range(unsigned long start, unsigned long stop)
  */
 _GLOBAL(flush_dcache_range)
-       li      r5,L1_CACHE_LINE_SIZE-1
+       li      r5,L1_CACHE_BYTES-1
        andc    r3,r3,r5
        subf    r4,r3,r4
        add     r4,r4,r5
-       srwi.   r4,r4,LG_L1_CACHE_LINE_SIZE
+       srwi.   r4,r4,L1_CACHE_SHIFT
        beqlr
        mtctr   r4
 
 1:     dcbf    0,r3
-       addi    r3,r3,L1_CACHE_LINE_SIZE
+       addi    r3,r3,L1_CACHE_BYTES
        bdnz    1b
        sync                            /* wait for dcbst's to get to ram */
        blr
@@ -698,16 +568,16 @@ _GLOBAL(flush_dcache_range)
  * invalidate_dcache_range(unsigned long start, unsigned long stop)
  */
 _GLOBAL(invalidate_dcache_range)
-       li      r5,L1_CACHE_LINE_SIZE-1
+       li      r5,L1_CACHE_BYTES-1
        andc    r3,r3,r5
        subf    r4,r3,r4
        add     r4,r4,r5
-       srwi.   r4,r4,LG_L1_CACHE_LINE_SIZE
+       srwi.   r4,r4,L1_CACHE_SHIFT
        beqlr
        mtctr   r4
 
 1:     dcbi    0,r3
-       addi    r3,r3,L1_CACHE_LINE_SIZE
+       addi    r3,r3,L1_CACHE_BYTES
        bdnz    1b
        sync                            /* wait for dcbi's to get to ram */
        blr
@@ -728,7 +598,7 @@ _GLOBAL(flush_dcache_all)
        mtctr   r4
        lis     r5, KERNELBASE@h
 1:     lwz     r3, 0(r5)               /* Load one word from every line */
-       addi    r5, r5, L1_CACHE_LINE_SIZE
+       addi    r5, r5, L1_CACHE_BYTES
        bdnz    1b
        blr
 #endif /* CONFIG_NOT_COHERENT_CACHE */
@@ -746,16 +616,16 @@ BEGIN_FTR_SECTION
        blr                                     /* for 601, do nothing */
 END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
        rlwinm  r3,r3,0,0,19                    /* Get page base address */
-       li      r4,4096/L1_CACHE_LINE_SIZE      /* Number of lines in a page */
+       li      r4,4096/L1_CACHE_BYTES  /* Number of lines in a page */
        mtctr   r4
        mr      r6,r3
 0:     dcbst   0,r3                            /* Write line to ram */
-       addi    r3,r3,L1_CACHE_LINE_SIZE
+       addi    r3,r3,L1_CACHE_BYTES
        bdnz    0b
        sync
        mtctr   r4
 1:     icbi    0,r6
-       addi    r6,r6,L1_CACHE_LINE_SIZE
+       addi    r6,r6,L1_CACHE_BYTES
        bdnz    1b
        sync
        isync
@@ -778,16 +648,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
        mtmsr   r0
        isync
        rlwinm  r3,r3,0,0,19                    /* Get page base address */
-       li      r4,4096/L1_CACHE_LINE_SIZE      /* Number of lines in a page */
+       li      r4,4096/L1_CACHE_BYTES  /* Number of lines in a page */
        mtctr   r4
        mr      r6,r3
 0:     dcbst   0,r3                            /* Write line to ram */
-       addi    r3,r3,L1_CACHE_LINE_SIZE
+       addi    r3,r3,L1_CACHE_BYTES
        bdnz    0b
        sync
        mtctr   r4
 1:     icbi    0,r6
-       addi    r6,r6,L1_CACHE_LINE_SIZE
+       addi    r6,r6,L1_CACHE_BYTES
        bdnz    1b
        sync
        mtmsr   r10                             /* restore DR */
@@ -802,7 +672,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  * void clear_pages(void *page, int order) ;
  */
 _GLOBAL(clear_pages)
-       li      r0,4096/L1_CACHE_LINE_SIZE
+       li      r0,4096/L1_CACHE_BYTES
        slw     r0,r0,r4
        mtctr   r0
 #ifdef CONFIG_8xx
@@ -814,7 +684,7 @@ _GLOBAL(clear_pages)
 #else
 1:     dcbz    0,r3
 #endif
-       addi    r3,r3,L1_CACHE_LINE_SIZE
+       addi    r3,r3,L1_CACHE_BYTES
        bdnz    1b
        blr
 
@@ -840,7 +710,7 @@ _GLOBAL(copy_page)
 
 #ifdef CONFIG_8xx
        /* don't use prefetch on 8xx */
-       li      r0,4096/L1_CACHE_LINE_SIZE
+       li      r0,4096/L1_CACHE_BYTES
        mtctr   r0
 1:     COPY_16_BYTES
        bdnz    1b
@@ -854,13 +724,13 @@ _GLOBAL(copy_page)
        li      r11,4
        mtctr   r0
 11:    dcbt    r11,r4
-       addi    r11,r11,L1_CACHE_LINE_SIZE
+       addi    r11,r11,L1_CACHE_BYTES
        bdnz    11b
 #else /* MAX_COPY_PREFETCH == 1 */
        dcbt    r5,r4
-       li      r11,L1_CACHE_LINE_SIZE+4
+       li      r11,L1_CACHE_BYTES+4
 #endif /* MAX_COPY_PREFETCH */
-       li      r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH
+       li      r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
        crclr   4*cr0+eq
 2:
        mtctr   r0
@@ -868,12 +738,12 @@ _GLOBAL(copy_page)
        dcbt    r11,r4
        dcbz    r5,r3
        COPY_16_BYTES
-#if L1_CACHE_LINE_SIZE >= 32
+#if L1_CACHE_BYTES >= 32
        COPY_16_BYTES
-#if L1_CACHE_LINE_SIZE >= 64
+#if L1_CACHE_BYTES >= 64
        COPY_16_BYTES
        COPY_16_BYTES
-#if L1_CACHE_LINE_SIZE >= 128
+#if L1_CACHE_BYTES >= 128
        COPY_16_BYTES
        COPY_16_BYTES
        COPY_16_BYTES
index 854e45b..ad4ef2a 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/byteorder.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
+#include <asm/machdep.h>
 
 #undef DEBUG
 
@@ -53,7 +54,7 @@ static u8* pci_to_OF_bus_map;
 /* By default, we don't re-assign bus numbers. We do this only on
  * some pmacs
  */
-int pci_assign_all_busses;
+int pci_assign_all_buses;
 
 struct pci_controller* hose_head;
 struct pci_controller** hose_tail = &hose_head;
@@ -644,7 +645,7 @@ pcibios_alloc_controller(void)
 /*
  * Functions below are used on OpenFirmware machines.
  */
-static void __openfirmware
+static void
 make_one_node_map(struct device_node* node, u8 pci_bus)
 {
        int *bus_range;
@@ -678,7 +679,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
        }
 }
        
-void __openfirmware
+void
 pcibios_make_OF_bus_map(void)
 {
        int i;
@@ -720,7 +721,7 @@ pcibios_make_OF_bus_map(void)
 
 typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
 
-static struct device_node* __openfirmware
+static struct device_node*
 scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
 {
        struct device_node* sub_node;
@@ -761,7 +762,7 @@ scan_OF_pci_childs_iterator(struct device_node* node, void* data)
        return 0;
 }
 
-static struct device_node* __openfirmware
+static struct device_node*
 scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
 {
        u8 filter_data[2] = {bus, dev_fn};
@@ -813,18 +814,20 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
        /* Now, lookup childs of the hose */
        return scan_OF_childs_for_device(node->child, busnr, devfn);
 }
+EXPORT_SYMBOL(pci_busdev_to_OF_node);
 
 struct device_node*
 pci_device_to_OF_node(struct pci_dev *dev)
 {
        return pci_busdev_to_OF_node(dev->bus, dev->devfn);
 }
+EXPORT_SYMBOL(pci_device_to_OF_node);
 
 /* This routine is meant to be used early during boot, when the
  * PCI bus numbers have not yet been assigned, and you need to
  * issue PCI config cycles to an OF device.
  * It could also be used to "fix" RTAS config cycles if you want
- * to set pci_assign_all_busses to 1 and still use RTAS for PCI
+ * to set pci_assign_all_buses to 1 and still use RTAS for PCI
  * config cycles.
  */
 struct pci_controller*
@@ -842,7 +845,7 @@ pci_find_hose_for_OF_device(struct device_node* node)
        return NULL;
 }
 
-static int __openfirmware
+static int
 find_OF_pci_device_filter(struct device_node* node, void* data)
 {
        return ((void *)node == data);
@@ -890,6 +893,7 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
        }
        return -ENODEV;
 }
+EXPORT_SYMBOL(pci_device_from_OF_node);
 
 void __init
 pci_process_bridge_OF_ranges(struct pci_controller *hose,
@@ -1030,6 +1034,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att
 }
 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
 
+#else /* CONFIG_PPC_OF */
+void pcibios_make_OF_bus_map(void)
+{
+}
 #endif /* CONFIG_PPC_OF */
 
 /* Add sysfs properties */
@@ -1262,12 +1270,12 @@ pcibios_init(void)
 
        /* Scan all of the recorded PCI controllers.  */
        for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
-               if (pci_assign_all_busses)
+               if (pci_assign_all_buses)
                        hose->first_busno = next_busno;
                hose->last_busno = 0xff;
                bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
                hose->last_busno = bus->subordinate;
-               if (pci_assign_all_busses || next_busno <= hose->last_busno)
+               if (pci_assign_all_buses || next_busno <= hose->last_busno)
                        next_busno = hose->last_busno + pcibios_assign_bus_offset;
        }
        pci_bus_count = next_busno;
@@ -1276,7 +1284,7 @@ pcibios_init(void)
         * numbers vs. kernel bus numbers since we may have to
         * remap them.
         */
-       if (pci_assign_all_busses && have_of)
+       if (pci_assign_all_buses && have_of)
                pcibios_make_OF_bus_map();
 
        /* Do machine dependent PCI interrupt routing */
diff --git a/arch/ppc/kernel/perfmon.c b/arch/ppc/kernel/perfmon.c
deleted file mode 100644 (file)
index 22df9a5..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/* kernel/perfmon.c
- * PPC 32 Performance Monitor Infrastructure
- *
- * Author: Andy Fleming
- * Copyright (c) 2004 Freescale Semiconductor, Inc
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/interrupt.h>
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/prctl.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/reg.h>
-#include <asm/xmon.h>
-
-/* A lock to regulate grabbing the interrupt */
-DEFINE_SPINLOCK(perfmon_lock);
-
-#if defined (CONFIG_FSL_BOOKE) && !defined (CONFIG_E200)
-static void dummy_perf(struct pt_regs *regs)
-{
-       unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
-
-       pmgc0 &= ~PMGC0_PMIE;
-       mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-#elif defined(CONFIG_6xx)
-/* Ensure exceptions are disabled */
-static void dummy_perf(struct pt_regs *regs)
-{
-       unsigned int mmcr0 = mfspr(SPRN_MMCR0);
-
-       mmcr0 &= ~MMCR0_PMXE;
-       mtspr(SPRN_MMCR0, mmcr0);
-}
-#else
-static void dummy_perf(struct pt_regs *regs)
-{
-}
-#endif
-
-void (*perf_irq)(struct pt_regs *) = dummy_perf;
-
-/* Grab the interrupt, if it's free.
- * Returns 0 on success, -1 if the interrupt is taken already */
-int request_perfmon_irq(void (*handler)(struct pt_regs *))
-{
-       int err = 0;
-
-       spin_lock(&perfmon_lock);
-
-       if (perf_irq == dummy_perf)
-               perf_irq = handler;
-       else {
-               pr_info("perfmon irq already handled by %p\n", perf_irq);
-               err = -1;
-       }
-
-       spin_unlock(&perfmon_lock);
-
-       return err;
-}
-
-void free_perfmon_irq(void)
-{
-       spin_lock(&perfmon_lock);
-
-       perf_irq = dummy_perf;
-
-       spin_unlock(&perfmon_lock);
-}
-
-EXPORT_SYMBOL(perf_irq);
-EXPORT_SYMBOL(request_perfmon_irq);
-EXPORT_SYMBOL(free_perfmon_irq);
index 03526bf..32455df 100644 (file)
@@ -32,7 +32,7 @@
 #include <asm/io.h>
 #include <asm/reg.h>
 #include <asm/xmon.h>
-#include <asm/perfmon.h>
+#include <asm/pmc.h>
 
 static inline u32 get_pmlca(int ctr);
 static inline void set_pmlca(int ctr, u32 pmlca);
index 88f6bb7..dcc8344 100644 (file)
 
 extern void transfer_to_handler(void);
 extern void do_IRQ(struct pt_regs *regs);
-extern void MachineCheckException(struct pt_regs *regs);
-extern void AlignmentException(struct pt_regs *regs);
-extern void ProgramCheckException(struct pt_regs *regs);
-extern void SingleStepException(struct pt_regs *regs);
+extern void machine_check_exception(struct pt_regs *regs);
+extern void alignment_exception(struct pt_regs *regs);
+extern void program_check_exception(struct pt_regs *regs);
+extern void single_step_exception(struct pt_regs *regs);
 extern int do_signal(sigset_t *, struct pt_regs *);
 extern int pmac_newworld;
 extern int sys_sigreturn(struct pt_regs *regs);
@@ -72,10 +72,10 @@ EXPORT_SYMBOL(clear_user_page);
 EXPORT_SYMBOL(do_signal);
 EXPORT_SYMBOL(transfer_to_handler);
 EXPORT_SYMBOL(do_IRQ);
-EXPORT_SYMBOL(MachineCheckException);
-EXPORT_SYMBOL(AlignmentException);
-EXPORT_SYMBOL(ProgramCheckException);
-EXPORT_SYMBOL(SingleStepException);
+EXPORT_SYMBOL(machine_check_exception);
+EXPORT_SYMBOL(alignment_exception);
+EXPORT_SYMBOL(program_check_exception);
+EXPORT_SYMBOL(single_step_exception);
 EXPORT_SYMBOL(sys_sigreturn);
 EXPORT_SYMBOL(ppc_n_lost_interrupts);
 EXPORT_SYMBOL(ppc_lost_interrupts);
@@ -230,9 +230,6 @@ EXPORT_SYMBOL(find_all_nodes);
 EXPORT_SYMBOL(get_property);
 EXPORT_SYMBOL(request_OF_resource);
 EXPORT_SYMBOL(release_OF_resource);
-EXPORT_SYMBOL(pci_busdev_to_OF_node);
-EXPORT_SYMBOL(pci_device_to_OF_node);
-EXPORT_SYMBOL(pci_device_from_OF_node);
 EXPORT_SYMBOL(of_find_node_by_name);
 EXPORT_SYMBOL(of_find_node_by_type);
 EXPORT_SYMBOL(of_find_compatible_node);
@@ -272,16 +269,6 @@ EXPORT_SYMBOL(screen_info);
 #endif
 
 EXPORT_SYMBOL(__delay);
-#ifndef INLINE_IRQS
-EXPORT_SYMBOL(local_irq_enable);
-EXPORT_SYMBOL(local_irq_enable_end);
-EXPORT_SYMBOL(local_irq_disable);
-EXPORT_SYMBOL(local_irq_disable_end);
-EXPORT_SYMBOL(local_save_flags_ptr);
-EXPORT_SYMBOL(local_save_flags_ptr_end);
-EXPORT_SYMBOL(local_irq_restore);
-EXPORT_SYMBOL(local_irq_restore_end);
-#endif
 EXPORT_SYMBOL(timer_interrupt);
 EXPORT_SYMBOL(irq_desc);
 EXPORT_SYMBOL(tb_ticks_per_jiffy);
@@ -336,10 +323,6 @@ extern long *intercept_table;
 EXPORT_SYMBOL(intercept_table);
 #endif /* CONFIG_PPC_STD_MMU */
 EXPORT_SYMBOL(cur_cpu_spec);
-#ifdef CONFIG_PPC_PMAC
-extern unsigned long agp_special_page;
-EXPORT_SYMBOL(agp_special_page);
-#endif
 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 EXPORT_SYMBOL(__mtdcr);
 EXPORT_SYMBOL(__mfdcr);
index 82de66e..6d60c40 100644 (file)
@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk)
 }
 #endif /* defined(CHECK_STACK) */
 
-#ifdef CONFIG_ALTIVEC
-int
-dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+/*
+ * Make sure the floating-point register state in the
+ * the thread_struct is up to date for task tsk.
+ */
+void flush_fp_to_thread(struct task_struct *tsk)
 {
-       if (regs->msr & MSR_VEC)
-               giveup_altivec(current);
-       memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
+       if (tsk->thread.regs) {
+               /*
+                * We need to disable preemption here because if we didn't,
+                * another process could get scheduled after the regs->msr
+                * test but before we have finished saving the FP registers
+                * to the thread_struct.  That process could take over the
+                * FPU, and then when we get scheduled again we would store
+                * bogus values for the remaining FP registers.
+                */
+               preempt_disable();
+               if (tsk->thread.regs->msr & MSR_FP) {
+#ifdef CONFIG_SMP
+                       /*
+                        * This should only ever be called for current or
+                        * for a stopped child process.  Since we save away
+                        * the FP register state on context switch on SMP,
+                        * there is something wrong if a stopped child appears
+                        * to still have its FP state in the CPU registers.
+                        */
+                       BUG_ON(tsk != current);
+#endif
+                       giveup_fpu(current);
+               }
+               preempt_enable();
+       }
+}
+
+void enable_kernel_fp(void)
+{
+       WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+       if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
+               giveup_fpu(current);
+       else
+               giveup_fpu(NULL);       /* just enables FP for kernel */
+#else
+       giveup_fpu(last_task_used_math);
+#endif /* CONFIG_SMP */
+}
+EXPORT_SYMBOL(enable_kernel_fp);
+
+int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+{
+       preempt_disable();
+       if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
+               giveup_fpu(tsk);
+       preempt_enable();
+       memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
        return 1;
 }
 
-void
-enable_kernel_altivec(void)
+#ifdef CONFIG_ALTIVEC
+void enable_kernel_altivec(void)
 {
        WARN_ON(preemptible());
 
@@ -177,19 +225,35 @@ enable_kernel_altivec(void)
 #endif /* __SMP __ */
 }
 EXPORT_SYMBOL(enable_kernel_altivec);
-#endif /* CONFIG_ALTIVEC */
 
-#ifdef CONFIG_SPE
-int
-dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+/*
+ * Make sure the VMX/Altivec register state in the
+ * the thread_struct is up to date for task tsk.
+ */
+void flush_altivec_to_thread(struct task_struct *tsk)
 {
-       if (regs->msr & MSR_SPE)
-               giveup_spe(current);
-       /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
-       memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
+       if (tsk->thread.regs) {
+               preempt_disable();
+               if (tsk->thread.regs->msr & MSR_VEC) {
+#ifdef CONFIG_SMP
+                       BUG_ON(tsk != current);
+#endif
+                       giveup_altivec(current);
+               }
+               preempt_enable();
+       }
+}
+
+int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+{
+       if (regs->msr & MSR_VEC)
+               giveup_altivec(current);
+       memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
        return 1;
 }
+#endif /* CONFIG_ALTIVEC */
 
+#ifdef CONFIG_SPE
 void
 enable_kernel_spe(void)
 {
@@ -205,34 +269,30 @@ enable_kernel_spe(void)
 #endif /* __SMP __ */
 }
 EXPORT_SYMBOL(enable_kernel_spe);
-#endif /* CONFIG_SPE */
 
-void
-enable_kernel_fp(void)
+void flush_spe_to_thread(struct task_struct *tsk)
 {
-       WARN_ON(preemptible());
-
+       if (tsk->thread.regs) {
+               preempt_disable();
+               if (tsk->thread.regs->msr & MSR_SPE) {
 #ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
-               giveup_fpu(current);
-       else
-               giveup_fpu(NULL);       /* just enables FP for kernel */
-#else
-       giveup_fpu(last_task_used_math);
-#endif /* CONFIG_SMP */
+                       BUG_ON(tsk != current);
+#endif
+                       giveup_spe(current);
+               }
+               preempt_enable();
+       }
 }
-EXPORT_SYMBOL(enable_kernel_fp);
 
-int
-dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
 {
-       preempt_disable();
-       if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
-               giveup_fpu(tsk);
-       preempt_enable();
-       memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
+       if (regs->msr & MSR_SPE)
+               giveup_spe(current);
+       /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
+       memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
        return 1;
 }
+#endif /* CONFIG_SPE */
 
 struct task_struct *__switch_to(struct task_struct *prev,
        struct task_struct *new)
@@ -557,14 +617,16 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
        return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
 }
 
-int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
+int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
+            unsigned long p4, unsigned long p5, unsigned long p6,
             struct pt_regs *regs)
 {
        CHECK_FULL_REGS(regs);
        return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
 }
 
-int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
+int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
+             unsigned long p4, unsigned long p5, unsigned long p6,
              struct pt_regs *regs)
 {
        CHECK_FULL_REGS(regs);
index 545cfd0..6bcb85d 100644 (file)
@@ -71,7 +71,8 @@ struct ide_machdep_calls ppc_ide_md;
 unsigned long boot_mem_size;
 
 unsigned long ISA_DMA_THRESHOLD;
-unsigned long DMA_MODE_READ, DMA_MODE_WRITE;
+unsigned int DMA_MODE_READ;
+unsigned int DMA_MODE_WRITE;
 
 #ifdef CONFIG_PPC_MULTIPLATFORM
 int _machine = 0;
@@ -82,8 +83,18 @@ extern void pmac_init(unsigned long r3, unsigned long r4,
                unsigned long r5, unsigned long r6, unsigned long r7);
 extern void chrp_init(unsigned long r3, unsigned long r4,
                unsigned long r5, unsigned long r6, unsigned long r7);
+
+dev_t boot_dev;
 #endif /* CONFIG_PPC_MULTIPLATFORM */
 
+int have_of;
+EXPORT_SYMBOL(have_of);
+
+#ifdef __DO_IRQ_CANON
+int ppc_do_canonicalize_irqs;
+EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
+#endif
+
 #ifdef CONFIG_MAGIC_SYSRQ
 unsigned long SYSRQ_KEY = 0x54;
 #endif /* CONFIG_MAGIC_SYSRQ */
@@ -185,18 +196,18 @@ int show_cpuinfo(struct seq_file *m, void *v)
        seq_printf(m, "processor\t: %d\n", i);
        seq_printf(m, "cpu\t\t: ");
 
-       if (cur_cpu_spec[i]->pvr_mask)
-               seq_printf(m, "%s", cur_cpu_spec[i]->cpu_name);
+       if (cur_cpu_spec->pvr_mask)
+               seq_printf(m, "%s", cur_cpu_spec->cpu_name);
        else
                seq_printf(m, "unknown (%08x)", pvr);
 #ifdef CONFIG_ALTIVEC
-       if (cur_cpu_spec[i]->cpu_features & CPU_FTR_ALTIVEC)
+       if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
                seq_printf(m, ", altivec supported");
 #endif
        seq_printf(m, "\n");
 
 #ifdef CONFIG_TAU
-       if (cur_cpu_spec[i]->cpu_features & CPU_FTR_TAU) {
+       if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
 #ifdef CONFIG_TAU_AVERAGE
                /* more straightforward, but potentially misleading */
                seq_printf(m,  "temperature \t: %u C (uncalibrated)\n",
@@ -339,7 +350,7 @@ early_init(int r3, int r4, int r5)
  * Assume here that all clock rates are the same in a
  * smp system.  -- Cort
  */
-int __openfirmware
+int
 of_show_percpuinfo(struct seq_file *m, int i)
 {
        struct device_node *cpu_node;
@@ -404,11 +415,15 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
                        _machine = _MACH_prep;
        }
 
+#ifdef CONFIG_PPC_PREP
        /* not much more to do here, if prep */
        if (_machine == _MACH_prep) {
                prep_init(r3, r4, r5, r6, r7);
                return;
        }
+#endif
+
+       have_of = 1;
 
        /* prom_init has already been called from __start */
        if (boot_infos)
@@ -479,12 +494,16 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
 #endif /* CONFIG_ADB */
 
        switch (_machine) {
+#ifdef CONFIG_PPC_PMAC
        case _MACH_Pmac:
                pmac_init(r3, r4, r5, r6, r7);
                break;
+#endif
+#ifdef CONFIG_PPC_CHRP
        case _MACH_chrp:
                chrp_init(r3, r4, r5, r6, r7);
                break;
+#endif
        }
 }
 
@@ -721,7 +740,7 @@ void __init setup_arch(char **cmdline_p)
 #endif
 
 #ifdef CONFIG_XMON
-       xmon_map_scc();
+       xmon_init(1);
        if (strstr(cmd_line, "xmon"))
                xmon(NULL);
 #endif /* CONFIG_XMON */
@@ -745,12 +764,12 @@ void __init setup_arch(char **cmdline_p)
         * for a possibly more accurate value.
         */
        if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
-               dcache_bsize = cur_cpu_spec[0]->dcache_bsize;
-               icache_bsize = cur_cpu_spec[0]->icache_bsize;
+               dcache_bsize = cur_cpu_spec->dcache_bsize;
+               icache_bsize = cur_cpu_spec->icache_bsize;
                ucache_bsize = 0;
        } else
                ucache_bsize = dcache_bsize = icache_bsize
-                       = cur_cpu_spec[0]->dcache_bsize;
+                       = cur_cpu_spec->dcache_bsize;
 
        /* reboot on panic */
        panic_timeout = 180;
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c
deleted file mode 100644 (file)
index 2244bf9..0000000
+++ /dev/null
@@ -1,771 +0,0 @@
-/*
- *  arch/ppc/kernel/signal.c
- *
- *  PowerPC version
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- *  Derived from "arch/i386/kernel/signal.c"
- *    Copyright (C) 1991, 1992 Linus Torvalds
- *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/elf.h>
-#include <linux/tty.h>
-#include <linux/binfmts.h>
-#include <linux/suspend.h>
-#include <asm/ucontext.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
-
-#undef DEBUG_SIG
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-extern void sigreturn_exit(struct pt_regs *);
-
-#define GP_REGS_SIZE   min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
-
-int do_signal(sigset_t *oldset, struct pt_regs *regs);
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-int
-sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
-              struct pt_regs *regs)
-{
-       sigset_t saveset;
-
-       mask &= _BLOCKABLE;
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       siginitset(&current->blocked, mask);
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs->result = -EINTR;
-       regs->gpr[3] = EINTR;
-       regs->ccr |= 0x10000000;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(&saveset, regs))
-                       sigreturn_exit(regs);
-       }
-}
-
-int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4,
-                 int p6, int p7, struct pt_regs *regs)
-{
-       sigset_t saveset, newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&newset, unewset, sizeof(newset)))
-               return -EFAULT;
-       sigdelsetmask(&newset, ~_BLOCKABLE);
-
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs->result = -EINTR;
-       regs->gpr[3] = EINTR;
-       regs->ccr |= 0x10000000;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(&saveset, regs))
-                       sigreturn_exit(regs);
-       }
-}
-
-
-int
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
-               int r6, int r7, int r8, struct pt_regs *regs)
-{
-       return do_sigaltstack(uss, uoss, regs->gpr[1]);
-}
-
-int
-sys_sigaction(int sig, const struct old_sigaction __user *act,
-             struct old_sigaction __user *oact)
-{
-       struct k_sigaction new_ka, old_ka;
-       int ret;
-
-       if (act) {
-               old_sigset_t mask;
-               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
-                       return -EFAULT;
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
-               siginitset(&new_ka.sa.sa_mask, mask);
-       }
-
-       ret = do_sigaction(sig, (act? &new_ka: NULL), (oact? &old_ka: NULL));
-
-       if (!ret && oact) {
-               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
-                       return -EFAULT;
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
-       }
-
-       return ret;
-}
-
-/*
- * When we have signals to deliver, we set up on the
- * user stack, going down from the original stack pointer:
- *     a sigregs struct
- *     a sigcontext struct
- *     a gap of __SIGNAL_FRAMESIZE bytes
- *
- * Each of these things must be a multiple of 16 bytes in size.
- *
- */
-struct sigregs {
-       struct mcontext mctx;           /* all the register values */
-       /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
-          and 18 fp regs below sp before decrementing it. */
-       int             abigap[56];
-};
-
-/* We use the mc_pad field for the signal return trampoline. */
-#define tramp  mc_pad
-
-/*
- *  When we have rt signals to deliver, we set up on the
- *  user stack, going down from the original stack pointer:
- *     one rt_sigframe struct (siginfo + ucontext + ABI gap)
- *     a gap of __SIGNAL_FRAMESIZE+16 bytes
- *  (the +16 is to get the siginfo and ucontext in the same
- *  positions as in older kernels).
- *
- *  Each of these things must be a multiple of 16 bytes in size.
- *
- */
-struct rt_sigframe
-{
-       struct siginfo info;
-       struct ucontext uc;
-       /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
-          and 18 fp regs below sp before decrementing it. */
-       int             abigap[56];
-};
-
-/*
- * Save the current user registers on the user stack.
- * We only save the altivec/spe registers if the process has used
- * altivec/spe instructions at some point.
- */
-static int
-save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret)
-{
-       /* save general and floating-point registers */
-       CHECK_FULL_REGS(regs);
-       preempt_disable();
-       if (regs->msr & MSR_FP)
-               giveup_fpu(current);
-#ifdef CONFIG_ALTIVEC
-       if (current->thread.used_vr && (regs->msr & MSR_VEC))
-               giveup_altivec(current);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-       if (current->thread.used_spe && (regs->msr & MSR_SPE))
-               giveup_spe(current);
-#endif /* CONFIG_ALTIVEC */
-       preempt_enable();
-
-       if (__copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE)
-           || __copy_to_user(&frame->mc_fregs, current->thread.fpr,
-                             ELF_NFPREG * sizeof(double)))
-               return 1;
-
-       current->thread.fpscr = 0;      /* turn off all fp exceptions */
-
-#ifdef CONFIG_ALTIVEC
-       /* save altivec registers */
-       if (current->thread.used_vr) {
-               if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
-                                  ELF_NVRREG * sizeof(vector128)))
-                       return 1;
-               /* set MSR_VEC in the saved MSR value to indicate that
-                  frame->mc_vregs contains valid data */
-               if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR]))
-                       return 1;
-       }
-       /* else assert((regs->msr & MSR_VEC) == 0) */
-
-       /* We always copy to/from vrsave, it's 0 if we don't have or don't
-        * use altivec. Since VSCR only contains 32 bits saved in the least
-        * significant bits of a vector, we "cheat" and stuff VRSAVE in the
-        * most significant bits of that same vector. --BenH
-        */
-       if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
-               return 1;
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_SPE
-       /* save spe registers */
-       if (current->thread.used_spe) {
-               if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
-                                  ELF_NEVRREG * sizeof(u32)))
-                       return 1;
-               /* set MSR_SPE in the saved MSR value to indicate that
-                  frame->mc_vregs contains valid data */
-               if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
-                       return 1;
-       }
-       /* else assert((regs->msr & MSR_SPE) == 0) */
-
-       /* We always copy to/from spefscr */
-       if (__put_user(current->thread.spefscr, (u32 *)&frame->mc_vregs + ELF_NEVRREG))
-               return 1;
-#endif /* CONFIG_SPE */
-
-       if (sigret) {
-               /* Set up the sigreturn trampoline: li r0,sigret; sc */
-               if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
-                   || __put_user(0x44000002UL, &frame->tramp[1]))
-                       return 1;
-               flush_icache_range((unsigned long) &frame->tramp[0],
-                                  (unsigned long) &frame->tramp[2]);
-       }
-
-       return 0;
-}
-
-/*
- * Restore the current user register values from the user stack,
- * (except for MSR).
- */
-static int
-restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig)
-{
-       unsigned long save_r2 = 0;
-#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
-       unsigned long msr;
-#endif
-
-       /* backup/restore the TLS as we don't want it to be modified */
-       if (!sig)
-               save_r2 = regs->gpr[2];
-       /* copy up to but not including MSR */
-       if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t)))
-               return 1;
-       /* copy from orig_r3 (the word after the MSR) up to the end */
-       if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
-                            GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
-               return 1;
-       if (!sig)
-               regs->gpr[2] = save_r2;
-
-       /* force the process to reload the FP registers from
-          current->thread when it next does FP instructions */
-       regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
-       if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
-                            sizeof(sr->mc_fregs)))
-               return 1;
-
-#ifdef CONFIG_ALTIVEC
-       /* force the process to reload the altivec registers from
-          current->thread when it next does altivec instructions */
-       regs->msr &= ~MSR_VEC;
-       if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
-               /* restore altivec registers from the stack */
-               if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
-                                    sizeof(sr->mc_vregs)))
-                       return 1;
-       } else if (current->thread.used_vr)
-               memset(&current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
-
-       /* Always get VRSAVE back */
-       if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
-               return 1;
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_SPE
-       /* force the process to reload the spe registers from
-          current->thread when it next does spe instructions */
-       regs->msr &= ~MSR_SPE;
-       if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
-               /* restore spe registers from the stack */
-               if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
-                                    ELF_NEVRREG * sizeof(u32)))
-                       return 1;
-       } else if (current->thread.used_spe)
-               memset(&current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
-
-       /* Always get SPEFSCR back */
-       if (__get_user(current->thread.spefscr, (u32 *)&sr->mc_vregs + ELF_NEVRREG))
-               return 1;
-#endif /* CONFIG_SPE */
-
-#ifndef CONFIG_SMP
-       preempt_disable();
-       if (last_task_used_math == current)
-               last_task_used_math = NULL;
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = NULL;
-       if (last_task_used_spe == current)
-               last_task_used_spe = NULL;
-       preempt_enable();
-#endif
-       return 0;
-}
-
-/*
- * Restore the user process's signal mask
- */
-static void
-restore_sigmask(sigset_t *set)
-{
-       sigdelsetmask(set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       current->blocked = *set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-}
-
-/*
- * Set up a signal frame for a "real-time" signal handler
- * (one which gets siginfo).
- */
-static void
-handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
-                siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
-                unsigned long newsp)
-{
-       struct rt_sigframe __user *rt_sf;
-       struct mcontext __user *frame;
-       unsigned long origsp = newsp;
-
-       /* Set up Signal Frame */
-       /* Put a Real Time Context onto stack */
-       newsp -= sizeof(*rt_sf);
-       rt_sf = (struct rt_sigframe __user *) newsp;
-
-       /* create a stack frame for the caller of the handler */
-       newsp -= __SIGNAL_FRAMESIZE + 16;
-
-       if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
-               goto badframe;
-
-       /* Put the siginfo & fill in most of the ucontext */
-       if (copy_siginfo_to_user(&rt_sf->info, info)
-           || __put_user(0, &rt_sf->uc.uc_flags)
-           || __put_user(0, &rt_sf->uc.uc_link)
-           || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
-           || __put_user(sas_ss_flags(regs->gpr[1]),
-                         &rt_sf->uc.uc_stack.ss_flags)
-           || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
-           || __put_user(&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs)
-           || __copy_to_user(&rt_sf->uc.uc_sigmask, oldset, sizeof(*oldset)))
-               goto badframe;
-
-       /* Save user registers on the stack */
-       frame = &rt_sf->uc.uc_mcontext;
-       if (save_user_regs(regs, frame, __NR_rt_sigreturn))
-               goto badframe;
-
-       if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
-               goto badframe;
-       regs->gpr[1] = newsp;
-       regs->gpr[3] = sig;
-       regs->gpr[4] = (unsigned long) &rt_sf->info;
-       regs->gpr[5] = (unsigned long) &rt_sf->uc;
-       regs->gpr[6] = (unsigned long) rt_sf;
-       regs->nip = (unsigned long) ka->sa.sa_handler;
-       regs->link = (unsigned long) frame->tramp;
-       regs->trap = 0;
-
-       return;
-
-badframe:
-#ifdef DEBUG_SIG
-       printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
-       force_sigsegv(sig, current);
-}
-
-static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
-{
-       sigset_t set;
-       struct mcontext __user *mcp;
-
-       if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(set))
-           || __get_user(mcp, &ucp->uc_regs))
-               return -EFAULT;
-       restore_sigmask(&set);
-       if (restore_user_regs(regs, mcp, sig))
-               return -EFAULT;
-
-       return 0;
-}
-
-int sys_swapcontext(struct ucontext __user *old_ctx,
-                   struct ucontext __user *new_ctx,
-                   int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
-{
-       unsigned char tmp;
-
-       /* Context size is for future use. Right now, we only make sure
-        * we are passed something we understand
-        */
-       if (ctx_size < sizeof(struct ucontext))
-               return -EINVAL;
-
-       if (old_ctx != NULL) {
-               if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
-                   || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
-                   || __copy_to_user(&old_ctx->uc_sigmask,
-                                     &current->blocked, sizeof(sigset_t))
-                   || __put_user(&old_ctx->uc_mcontext, &old_ctx->uc_regs))
-                       return -EFAULT;
-       }
-       if (new_ctx == NULL)
-               return 0;
-       if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
-           || __get_user(tmp, (u8 __user *) new_ctx)
-           || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
-               return -EFAULT;
-
-       /*
-        * If we get a fault copying the context into the kernel's
-        * image of the user's registers, we can't just return -EFAULT
-        * because the user's registers will be corrupted.  For instance
-        * the NIP value may have been updated but not some of the
-        * other registers.  Given that we have done the access_ok
-        * and successfully read the first and last bytes of the region
-        * above, this should only happen in an out-of-memory situation
-        * or if another thread unmaps the region containing the context.
-        * We kill the task with a SIGSEGV in this situation.
-        */
-       if (do_setcontext(new_ctx, regs, 0))
-               do_exit(SIGSEGV);
-       sigreturn_exit(regs);
-       /* doesn't actually return back to here */
-       return 0;
-}
-
-int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
-                    struct pt_regs *regs)
-{
-       struct rt_sigframe __user *rt_sf;
-
-       /* Always make any pending restarted system calls return -EINTR */
-       current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-       rt_sf = (struct rt_sigframe __user *)
-               (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
-       if (!access_ok(VERIFY_READ, rt_sf, sizeof(struct rt_sigframe)))
-               goto bad;
-       if (do_setcontext(&rt_sf->uc, regs, 1))
-               goto bad;
-
-       /*
-        * It's not clear whether or why it is desirable to save the
-        * sigaltstack setting on signal delivery and restore it on
-        * signal return.  But other architectures do this and we have
-        * always done it up until now so it is probably better not to
-        * change it.  -- paulus
-        */
-       do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
-
-       sigreturn_exit(regs);           /* doesn't return here */
-       return 0;
-
- bad:
-       force_sig(SIGSEGV, current);
-       return 0;
-}
-
-int sys_debug_setcontext(struct ucontext __user *ctx,
-                        int ndbg, struct sig_dbg_op __user *dbg,
-                        int r6, int r7, int r8,
-                        struct pt_regs *regs)
-{
-       struct sig_dbg_op op;
-       int i;
-       unsigned long new_msr = regs->msr;
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-       unsigned long new_dbcr0 = current->thread.dbcr0;
-#endif
-
-       for (i=0; i<ndbg; i++) {
-               if (__copy_from_user(&op, dbg, sizeof(op)))
-                       return -EFAULT;
-               switch (op.dbg_type) {
-               case SIG_DBG_SINGLE_STEPPING:
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-                       if (op.dbg_value) {
-                               new_msr |= MSR_DE;
-                               new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
-                       } else {
-                               new_msr &= ~MSR_DE;
-                               new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
-                       }
-#else
-                       if (op.dbg_value)
-                               new_msr |= MSR_SE;
-                       else
-                               new_msr &= ~MSR_SE;
-#endif
-                       break;
-               case SIG_DBG_BRANCH_TRACING:
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-                       return -EINVAL;
-#else
-                       if (op.dbg_value)
-                               new_msr |= MSR_BE;
-                       else
-                               new_msr &= ~MSR_BE;
-#endif
-                       break;
-
-               default:
-                       return -EINVAL;
-               }
-       }
-
-       /* We wait until here to actually install the values in the
-          registers so if we fail in the above loop, it will not
-          affect the contents of these registers.  After this point,
-          failure is a problem, anyway, and it's very unlikely unless
-          the user is really doing something wrong. */
-       regs->msr = new_msr;
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-       current->thread.dbcr0 = new_dbcr0;
-#endif
-
-       /*
-        * If we get a fault copying the context into the kernel's
-        * image of the user's registers, we can't just return -EFAULT
-        * because the user's registers will be corrupted.  For instance
-        * the NIP value may have been updated but not some of the
-        * other registers.  Given that we have done the access_ok
-        * and successfully read the first and last bytes of the region
-        * above, this should only happen in an out-of-memory situation
-        * or if another thread unmaps the region containing the context.
-        * We kill the task with a SIGSEGV in this situation.
-        */
-       if (do_setcontext(ctx, regs, 1)) {
-               force_sig(SIGSEGV, current);
-               goto out;
-       }
-
-       /*
-        * It's not clear whether or why it is desirable to save the
-        * sigaltstack setting on signal delivery and restore it on
-        * signal return.  But other architectures do this and we have
-        * always done it up until now so it is probably better not to
-        * change it.  -- paulus
-        */
-       do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
-
-       sigreturn_exit(regs);
-       /* doesn't actually return back to here */
-
- out:
-       return 0;
-}
-
-/*
- * OK, we're invoking a handler
- */
-static void
-handle_signal(unsigned long sig, struct k_sigaction *ka,
-             siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
-             unsigned long newsp)
-{
-       struct sigcontext __user *sc;
-       struct sigregs __user *frame;
-       unsigned long origsp = newsp;
-
-       /* Set up Signal Frame */
-       newsp -= sizeof(struct sigregs);
-       frame = (struct sigregs __user *) newsp;
-
-       /* Put a sigcontext on the stack */
-       newsp -= sizeof(*sc);
-       sc = (struct sigcontext __user *) newsp;
-
-       /* create a stack frame for the caller of the handler */
-       newsp -= __SIGNAL_FRAMESIZE;
-
-       if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
-               goto badframe;
-
-#if _NSIG != 64
-#error "Please adjust handle_signal()"
-#endif
-       if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler)
-           || __put_user(oldset->sig[0], &sc->oldmask)
-           || __put_user(oldset->sig[1], &sc->_unused[3])
-           || __put_user((struct pt_regs __user *)frame, &sc->regs)
-           || __put_user(sig, &sc->signal))
-               goto badframe;
-
-       if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
-               goto badframe;
-
-       if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
-               goto badframe;
-       regs->gpr[1] = newsp;
-       regs->gpr[3] = sig;
-       regs->gpr[4] = (unsigned long) sc;
-       regs->nip = (unsigned long) ka->sa.sa_handler;
-       regs->link = (unsigned long) frame->mctx.tramp;
-       regs->trap = 0;
-
-       return;
-
-badframe:
-#ifdef DEBUG_SIG
-       printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
-       force_sigsegv(sig, current);
-}
-
-/*
- * Do a signal return; undo the signal stack.
- */
-int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
-                 struct pt_regs *regs)
-{
-       struct sigcontext __user *sc;
-       struct sigcontext sigctx;
-       struct mcontext __user *sr;
-       sigset_t set;
-
-       /* Always make any pending restarted system calls return -EINTR */
-       current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-       sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
-       if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
-               goto badframe;
-
-       set.sig[0] = sigctx.oldmask;
-       set.sig[1] = sigctx._unused[3];
-       restore_sigmask(&set);
-
-       sr = (struct mcontext __user *) sigctx.regs;
-       if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
-           || restore_user_regs(regs, sr, 1))
-               goto badframe;
-
-       sigreturn_exit(regs);           /* doesn't return */
-       return 0;
-
-badframe:
-       force_sig(SIGSEGV, current);
-       return 0;
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-int do_signal(sigset_t *oldset, struct pt_regs *regs)
-{
-       siginfo_t info;
-       struct k_sigaction ka;
-       unsigned long frame, newsp;
-       int signr, ret;
-
-       if (try_to_freeze()) {
-               signr = 0;
-               if (!signal_pending(current))
-                       goto no_signal;
-       }
-
-       if (!oldset)
-               oldset = &current->blocked;
-
-       newsp = frame = 0;
-
-       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- no_signal:
-       if (TRAP(regs) == 0x0C00                /* System Call! */
-           && regs->ccr & 0x10000000           /* error signalled */
-           && ((ret = regs->gpr[3]) == ERESTARTSYS
-               || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
-               || ret == ERESTART_RESTARTBLOCK)) {
-
-               if (signr > 0
-                   && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
-                       || (ret == ERESTARTSYS
-                           && !(ka.sa.sa_flags & SA_RESTART)))) {
-                       /* make the system call return an EINTR error */
-                       regs->result = -EINTR;
-                       regs->gpr[3] = EINTR;
-                       /* note that the cr0.SO bit is already set */
-               } else {
-                       regs->nip -= 4; /* Back up & retry system call */
-                       regs->result = 0;
-                       regs->trap = 0;
-                       if (ret == ERESTART_RESTARTBLOCK)
-                               regs->gpr[0] = __NR_restart_syscall;
-                       else
-                               regs->gpr[3] = regs->orig_gpr3;
-               }
-       }
-
-       if (signr == 0)
-               return 0;               /* no signals delivered */
-
-       if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
-           && !on_sig_stack(regs->gpr[1]))
-               newsp = current->sas_ss_sp + current->sas_ss_size;
-       else
-               newsp = regs->gpr[1];
-       newsp &= ~0xfUL;
-
-       /* Whee!  Actually deliver the signal.  */
-       if (ka.sa.sa_flags & SA_SIGINFO)
-               handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
-       else
-               handle_signal(signr, &ka, &info, oldset, regs, newsp);
-
-       spin_lock_irq(&current->sighand->siglock);
-       sigorsets(&current->blocked,&current->blocked,&ka.sa.sa_mask);
-       if (!(ka.sa.sa_flags & SA_NODEFER))
-               sigaddset(&current->blocked, signr);
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       return 1;
-}
-
index 726fe7c..bc5bf11 100644 (file)
 #include <asm/thread_info.h>
 #include <asm/tlbflush.h>
 #include <asm/xmon.h>
+#include <asm/machdep.h>
 
 volatile int smp_commenced;
 int smp_tb_synchronized;
 struct cpuinfo_PPC cpu_data[NR_CPUS];
-struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
 atomic_t ipi_recv;
 atomic_t ipi_sent;
 cpumask_t cpu_online_map;
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(cpu_possible_map);
 
 /* SMP operations for this machine */
-static struct smp_ops_t *smp_ops;
+struct smp_ops_t *smp_ops;
 
 /* all cpu mappings are 1-1 -- Cort */
 volatile unsigned long cpu_callin_map[NR_CPUS];
@@ -74,11 +74,11 @@ extern void __save_cpu_setup(void);
 #define PPC_MSG_XMON_BREAK     3
 
 static inline void
-smp_message_pass(int target, int msg, unsigned long data, int wait)
+smp_message_pass(int target, int msg)
 {
-       if (smp_ops){
+       if (smp_ops) {
                atomic_inc(&ipi_sent);
-               smp_ops->message_pass(target,msg,data,wait);
+               smp_ops->message_pass(target, msg);
        }
 }
 
@@ -119,7 +119,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
 void smp_send_tlb_invalidate(int cpu)
 {
        if ( PVR_VER(mfspr(SPRN_PVR)) == 8 )
-               smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB, 0, 0);
+               smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB);
 }
 
 void smp_send_reschedule(int cpu)
@@ -135,13 +135,13 @@ void smp_send_reschedule(int cpu)
         */
        /* This is only used if `cpu' is running an idle task,
           so it will reschedule itself anyway... */
-       smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0);
+       smp_message_pass(cpu, PPC_MSG_RESCHEDULE);
 }
 
 #ifdef CONFIG_XMON
 void smp_send_xmon_break(int cpu)
 {
-       smp_message_pass(cpu, PPC_MSG_XMON_BREAK, 0, 0);
+       smp_message_pass(cpu, PPC_MSG_XMON_BREAK);
 }
 #endif /* CONFIG_XMON */
 
@@ -224,7 +224,7 @@ static int __smp_call_function(void (*func) (void *info), void *info,
        spin_lock(&call_lock);
        call_data = &data;
        /* Send a message to all other CPUs and wait for them to respond */
-       smp_message_pass(target, PPC_MSG_CALL_FUNCTION, 0, 0);
+       smp_message_pass(target, PPC_MSG_CALL_FUNCTION);
 
        /* Wait for response */
        timeout = 1000000;
@@ -294,7 +294,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
         smp_store_cpu_info(smp_processor_id());
        cpu_callin_map[smp_processor_id()] = 1;
 
-       smp_ops = ppc_md.smp_ops;
        if (smp_ops == NULL) {
                printk("SMP not supported on this machine.\n");
                return;
@@ -308,9 +307,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        /* Backup CPU 0 state */
        __save_cpu_setup();
 
-       if (smp_ops->space_timers)
-               smp_ops->space_timers(num_cpus);
-
        for_each_cpu(cpu) {
                if (cpu == smp_processor_id())
                        continue;
diff --git a/arch/ppc/kernel/syscalls.c b/arch/ppc/kernel/syscalls.c
deleted file mode 100644 (file)
index 127f040..0000000
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * arch/ppc/kernel/sys_ppc.c
- *
- *  PowerPC version
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Derived from "arch/i386/kernel/sys_i386.c"
- * Adapted from the i386 version by Gary Thomas
- * Modified by Cort Dougan (cort@cs.nmt.edu)
- * and Paul Mackerras (paulus@cs.anu.edu.au).
- *
- * This file contains various random system calls that
- * have a non-standard calling sequence on the Linux/PPC
- * platform.
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/stat.h>
-#include <linux/syscalls.h>
-#include <linux/mman.h>
-#include <linux/sys.h>
-#include <linux/ipc.h>
-#include <linux/utsname.h>
-#include <linux/file.h>
-#include <linux/unistd.h>
-
-#include <asm/uaccess.h>
-#include <asm/ipc.h>
-#include <asm/semaphore.h>
-
-
-/*
- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
- *
- * This is really horribly ugly.
- */
-int
-sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth)
-{
-       int version, ret;
-
-       version = call >> 16; /* hack for backward compatibility */
-       call &= 0xffff;
-
-       ret = -ENOSYS;
-       switch (call) {
-       case SEMOP:
-               ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
-                                     second, NULL);
-               break;
-       case SEMTIMEDOP:
-               ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
-                                     second, (const struct timespec __user *) fifth);
-               break;
-       case SEMGET:
-               ret = sys_semget (first, second, third);
-               break;
-       case SEMCTL: {
-               union semun fourth;
-
-               if (!ptr)
-                       break;
-               if ((ret = access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT)
-                   || (ret = get_user(fourth.__pad, (void __user *__user *)ptr)))
-                       break;
-               ret = sys_semctl (first, second, third, fourth);
-               break;
-               }
-       case MSGSND:
-               ret = sys_msgsnd (first, (struct msgbuf __user *) ptr, second, third);
-               break;
-       case MSGRCV:
-               switch (version) {
-               case 0: {
-                       struct ipc_kludge tmp;
-
-                       if (!ptr)
-                               break;
-                       if ((ret = access_ok(VERIFY_READ, ptr, sizeof(tmp)) ? 0 : -EFAULT)
-                           || (ret = copy_from_user(&tmp,
-                                       (struct ipc_kludge __user *) ptr,
-                                       sizeof (tmp)) ? -EFAULT : 0))
-                               break;
-                       ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp,
-                                         third);
-                       break;
-                       }
-               default:
-                       ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
-                                         second, fifth, third);
-                       break;
-               }
-               break;
-       case MSGGET:
-               ret = sys_msgget ((key_t) first, second);
-               break;
-       case MSGCTL:
-               ret = sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
-               break;
-       case SHMAT: {
-               ulong raddr;
-
-               if ((ret = access_ok(VERIFY_WRITE, (ulong __user *) third,
-                                      sizeof(ulong)) ? 0 : -EFAULT))
-                       break;
-               ret = do_shmat (first, (char __user *) ptr, second, &raddr);
-               if (ret)
-                       break;
-               ret = put_user (raddr, (ulong __user *) third);
-               break;
-               }
-       case SHMDT:
-               ret = sys_shmdt ((char __user *)ptr);
-               break;
-       case SHMGET:
-               ret = sys_shmget (first, second, third);
-               break;
-       case SHMCTL:
-               ret = sys_shmctl (first, second, (struct shmid_ds __user *) ptr);
-               break;
-       }
-
-       return ret;
-}
-
-/*
- * sys_pipe() is the normal C calling standard for creating
- * a pipe. It's not the way unix traditionally does this, though.
- */
-int sys_pipe(int __user *fildes)
-{
-       int fd[2];
-       int error;
-
-       error = do_pipe(fd);
-       if (!error) {
-               if (copy_to_user(fildes, fd, 2*sizeof(int)))
-                       error = -EFAULT;
-       }
-       return error;
-}
-
-static inline unsigned long
-do_mmap2(unsigned long addr, size_t len,
-        unsigned long prot, unsigned long flags,
-        unsigned long fd, unsigned long pgoff)
-{
-       struct file * file = NULL;
-       int ret = -EBADF;
-
-       flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-       if (!(flags & MAP_ANONYMOUS)) {
-               if (!(file = fget(fd)))
-                       goto out;
-       }
-
-       down_write(&current->mm->mmap_sem);
-       ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-       up_write(&current->mm->mmap_sem);
-       if (file)
-               fput(file);
-out:
-       return ret;
-}
-
-unsigned long sys_mmap2(unsigned long addr, size_t len,
-                       unsigned long prot, unsigned long flags,
-                       unsigned long fd, unsigned long pgoff)
-{
-       return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
-
-unsigned long sys_mmap(unsigned long addr, size_t len,
-                      unsigned long prot, unsigned long flags,
-                      unsigned long fd, off_t offset)
-{
-       int err = -EINVAL;
-
-       if (offset & ~PAGE_MASK)
-               goto out;
-
-       err = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
-out:
-       return err;
-}
-
-/*
- * Due to some executables calling the wrong select we sometimes
- * get wrong args.  This determines how the args are being passed
- * (a single ptr to them all args passed) then calls
- * sys_select() with the appropriate args. -- Cort
- */
-int
-ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
-{
-       if ( (unsigned long)n >= 4096 )
-       {
-               unsigned long __user *buffer = (unsigned long __user *)n;
-               if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
-                   || __get_user(n, buffer)
-                   || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
-                   || __get_user(outp, ((fd_set  __user * __user *)(buffer+2)))
-                   || __get_user(exp, ((fd_set  __user * __user *)(buffer+3)))
-                   || __get_user(tvp, ((struct timeval  __user * __user *)(buffer+4))))
-                       return -EFAULT;
-       }
-       return sys_select(n, inp, outp, exp, tvp);
-}
-
-int sys_uname(struct old_utsname __user * name)
-{
-       int err = -EFAULT;
-
-       down_read(&uts_sem);
-       if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
-               err = 0;
-       up_read(&uts_sem);
-       return err;
-}
-
-int sys_olduname(struct oldold_utsname __user * name)
-{
-       int error;
-
-       if (!name)
-               return -EFAULT;
-       if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
-               return -EFAULT;
-
-       down_read(&uts_sem);
-       error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
-       error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
-       error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
-       error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
-       error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
-       error -= __put_user(0,name->release+__OLD_UTS_LEN);
-       error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
-       error -= __put_user(0,name->version+__OLD_UTS_LEN);
-       error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
-       error = __put_user(0,name->machine+__OLD_UTS_LEN);
-       up_read(&uts_sem);
-
-       error = error ? -EFAULT : 0;
-       return error;
-}
-
-/*
- * We put the arguments in a different order so we only use 6
- * registers for arguments, rather than 7 as sys_fadvise64_64 needs
- * (because `offset' goes in r5/r6).
- */
-long ppc_fadvise64_64(int fd, int advice, loff_t offset, loff_t len)
-{
-       return sys_fadvise64_64(fd, offset, len, advice);
-}
index 22d7fd1..76f44ce 100644 (file)
@@ -121,6 +121,15 @@ unsigned long profile_pc(struct pt_regs *regs)
 EXPORT_SYMBOL(profile_pc);
 #endif
 
+void wakeup_decrementer(void)
+{
+       set_dec(tb_ticks_per_jiffy);
+       /* No currently-supported powerbook has a 601,
+        * so use get_tbl, not native
+        */
+       last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
+}
+
 /*
  * timer_interrupt - gets called when the decrementer overflows,
  * with interrupts disabled.
index 961ede8..5e4bf88 100644 (file)
 #ifdef CONFIG_PMAC_BACKLIGHT
 #include <asm/backlight.h>
 #endif
-#include <asm/perfmon.h>
+#include <asm/pmc.h>
 
 #ifdef CONFIG_XMON
+extern int xmon_bpt(struct pt_regs *regs);
+extern int xmon_sstep(struct pt_regs *regs);
+extern int xmon_iabr_match(struct pt_regs *regs);
+extern int xmon_dabr_match(struct pt_regs *regs);
+
 void (*debugger)(struct pt_regs *regs) = xmon;
 int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
 int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
@@ -74,7 +79,7 @@ void (*debugger_fault_handler)(struct pt_regs *regs);
 
 DEFINE_SPINLOCK(die_lock);
 
-void die(const char * str, struct pt_regs * fp, long err)
+int die(const char * str, struct pt_regs * fp, long err)
 {
        static int die_counter;
        int nl = 0;
@@ -232,7 +237,7 @@ platform_machine_check(struct pt_regs *regs)
 {
 }
 
-void MachineCheckException(struct pt_regs *regs)
+void machine_check_exception(struct pt_regs *regs)
 {
        unsigned long reason = get_mc_reason(regs);
 
@@ -393,14 +398,14 @@ void SMIException(struct pt_regs *regs)
 #endif
 }
 
-void UnknownException(struct pt_regs *regs)
+void unknown_exception(struct pt_regs *regs)
 {
        printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
               regs->nip, regs->msr, regs->trap, print_tainted());
        _exception(SIGTRAP, regs, 0, 0);
 }
 
-void InstructionBreakpoint(struct pt_regs *regs)
+void instruction_breakpoint_exception(struct pt_regs *regs)
 {
        if (debugger_iabr_match(regs))
                return;
@@ -575,7 +580,7 @@ extern struct bug_entry __start___bug_table[], __stop___bug_table[];
 #define module_find_bug(x)     NULL
 #endif
 
-static struct bug_entry *find_bug(unsigned long bugaddr)
+struct bug_entry *find_bug(unsigned long bugaddr)
 {
        struct bug_entry *bug;
 
@@ -622,7 +627,7 @@ int check_bug_trap(struct pt_regs *regs)
        return 0;
 }
 
-void ProgramCheckException(struct pt_regs *regs)
+void program_check_exception(struct pt_regs *regs)
 {
        unsigned int reason = get_reason(regs);
        extern int do_mathemu(struct pt_regs *regs);
@@ -701,7 +706,7 @@ void ProgramCheckException(struct pt_regs *regs)
                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
 }
 
-void SingleStepException(struct pt_regs *regs)
+void single_step_exception(struct pt_regs *regs)
 {
        regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
        if (debugger_sstep(regs))
@@ -709,7 +714,7 @@ void SingleStepException(struct pt_regs *regs)
        _exception(SIGTRAP, regs, TRAP_TRACE, 0);
 }
 
-void AlignmentException(struct pt_regs *regs)
+void alignment_exception(struct pt_regs *regs)
 {
        int fixed;
 
@@ -814,7 +819,18 @@ void TAUException(struct pt_regs *regs)
 }
 #endif /* CONFIG_INT_TAU */
 
-void AltivecUnavailException(struct pt_regs *regs)
+/*
+ * FP unavailable trap from kernel - print a message, but let
+ * the task use FP in the kernel until it returns to user mode.
+ */
+void kernel_fp_unavailable_exception(struct pt_regs *regs)
+{
+       regs->msr |= MSR_FP;
+       printk(KERN_ERR "floating point used in kernel (task=%p, pc=%lx)\n",
+              current, regs->nip);
+}
+
+void altivec_unavailable_exception(struct pt_regs *regs)
 {
        static int kernel_altivec_count;
 
@@ -835,7 +851,7 @@ void AltivecUnavailException(struct pt_regs *regs)
 }
 
 #ifdef CONFIG_ALTIVEC
-void AltivecAssistException(struct pt_regs *regs)
+void altivec_assist_exception(struct pt_regs *regs)
 {
        int err;
 
@@ -872,7 +888,7 @@ void AltivecAssistException(struct pt_regs *regs)
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_E500
-void PerformanceMonitorException(struct pt_regs *regs)
+void performance_monitor_exception(struct pt_regs *regs)
 {
        perf_irq(regs);
 }
diff --git a/arch/ppc/kernel/vector.S b/arch/ppc/kernel/vector.S
deleted file mode 100644 (file)
index 82a2134..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-#include <asm/ppc_asm.h>
-#include <asm/processor.h>
-
-/*
- * The routines below are in assembler so we can closely control the
- * usage of floating-point registers.  These routines must be called
- * with preempt disabled.
- */
-       .data
-fpzero:
-       .long   0
-fpone:
-       .long   0x3f800000      /* 1.0 in single-precision FP */
-fphalf:
-       .long   0x3f000000      /* 0.5 in single-precision FP */
-
-       .text
-/*
- * Internal routine to enable floating point and set FPSCR to 0.
- * Don't call it from C; it doesn't use the normal calling convention.
- */
-fpenable:
-       mfmsr   r10
-       ori     r11,r10,MSR_FP
-       mtmsr   r11
-       isync
-       stfd    fr0,24(r1)
-       stfd    fr1,16(r1)
-       stfd    fr31,8(r1)
-       lis     r11,fpzero@ha
-       mffs    fr31
-       lfs     fr1,fpzero@l(r11)
-       mtfsf   0xff,fr1
-       blr
-
-fpdisable:
-       mtfsf   0xff,fr31
-       lfd     fr31,8(r1)
-       lfd     fr1,16(r1)
-       lfd     fr0,24(r1)
-       mtmsr   r10
-       isync
-       blr
-
-/*
- * Vector add, floating point.
- */
-       .globl  vaddfp
-vaddfp:
-       stwu    r1,-32(r1)
-       mflr    r0
-       stw     r0,36(r1)
-       bl      fpenable
-       li      r0,4
-       mtctr   r0
-       li      r6,0
-1:     lfsx    fr0,r4,r6
-       lfsx    fr1,r5,r6
-       fadds   fr0,fr0,fr1
-       stfsx   fr0,r3,r6
-       addi    r6,r6,4
-       bdnz    1b
-       bl      fpdisable
-       lwz     r0,36(r1)
-       mtlr    r0
-       addi    r1,r1,32
-       blr
-
-/*
- * Vector subtract, floating point.
- */
-       .globl  vsubfp
-vsubfp:
-       stwu    r1,-32(r1)
-       mflr    r0
-       stw     r0,36(r1)
-       bl      fpenable
-       li      r0,4
-       mtctr   r0
-       li      r6,0
-1:     lfsx    fr0,r4,r6
-       lfsx    fr1,r5,r6
-       fsubs   fr0,fr0,fr1
-       stfsx   fr0,r3,r6
-       addi    r6,r6,4
-       bdnz    1b
-       bl      fpdisable
-       lwz     r0,36(r1)
-       mtlr    r0
-       addi    r1,r1,32
-       blr
-
-/*
- * Vector multiply and add, floating point.
- */
-       .globl  vmaddfp
-vmaddfp:
-       stwu    r1,-48(r1)
-       mflr    r0
-       stw     r0,52(r1)
-       bl      fpenable
-       stfd    fr2,32(r1)
-       li      r0,4
-       mtctr   r0
-       li      r7,0
-1:     lfsx    fr0,r4,r7
-       lfsx    fr1,r5,r7
-       lfsx    fr2,r6,r7
-       fmadds  fr0,fr0,fr2,fr1
-       stfsx   fr0,r3,r7
-       addi    r7,r7,4
-       bdnz    1b
-       lfd     fr2,32(r1)
-       bl      fpdisable
-       lwz     r0,52(r1)
-       mtlr    r0
-       addi    r1,r1,48
-       blr
-
-/*
- * Vector negative multiply and subtract, floating point.
- */
-       .globl  vnmsubfp
-vnmsubfp:
-       stwu    r1,-48(r1)
-       mflr    r0
-       stw     r0,52(r1)
-       bl      fpenable
-       stfd    fr2,32(r1)
-       li      r0,4
-       mtctr   r0
-       li      r7,0
-1:     lfsx    fr0,r4,r7
-       lfsx    fr1,r5,r7
-       lfsx    fr2,r6,r7
-       fnmsubs fr0,fr0,fr2,fr1
-       stfsx   fr0,r3,r7
-       addi    r7,r7,4
-       bdnz    1b
-       lfd     fr2,32(r1)
-       bl      fpdisable
-       lwz     r0,52(r1)
-       mtlr    r0
-       addi    r1,r1,48
-       blr
-
-/*
- * Vector reciprocal estimate.  We just compute 1.0/x.
- * r3 -> destination, r4 -> source.
- */
-       .globl  vrefp
-vrefp:
-       stwu    r1,-32(r1)
-       mflr    r0
-       stw     r0,36(r1)
-       bl      fpenable
-       lis     r9,fpone@ha
-       li      r0,4
-       lfs     fr1,fpone@l(r9)
-       mtctr   r0
-       li      r6,0
-1:     lfsx    fr0,r4,r6
-       fdivs   fr0,fr1,fr0
-       stfsx   fr0,r3,r6
-       addi    r6,r6,4
-       bdnz    1b
-       bl      fpdisable
-       lwz     r0,36(r1)
-       mtlr    r0
-       addi    r1,r1,32
-       blr
-
-/*
- * Vector reciprocal square-root estimate, floating point.
- * We use the frsqrte instruction for the initial estimate followed
- * by 2 iterations of Newton-Raphson to get sufficient accuracy.
- * r3 -> destination, r4 -> source.
- */
-       .globl  vrsqrtefp
-vrsqrtefp:
-       stwu    r1,-48(r1)
-       mflr    r0
-       stw     r0,52(r1)
-       bl      fpenable
-       stfd    fr2,32(r1)
-       stfd    fr3,40(r1)
-       stfd    fr4,48(r1)
-       stfd    fr5,56(r1)
-       lis     r9,fpone@ha
-       lis     r8,fphalf@ha
-       li      r0,4
-       lfs     fr4,fpone@l(r9)
-       lfs     fr5,fphalf@l(r8)
-       mtctr   r0
-       li      r6,0
-1:     lfsx    fr0,r4,r6
-       frsqrte fr1,fr0         /* r = frsqrte(s) */
-       fmuls   fr3,fr1,fr0     /* r * s */
-       fmuls   fr2,fr1,fr5     /* r * 0.5 */
-       fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
-       fmadds  fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
-       fmuls   fr3,fr1,fr0     /* r * s */
-       fmuls   fr2,fr1,fr5     /* r * 0.5 */
-       fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
-       fmadds  fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
-       stfsx   fr1,r3,r6
-       addi    r6,r6,4
-       bdnz    1b
-       lfd     fr5,56(r1)
-       lfd     fr4,48(r1)
-       lfd     fr3,40(r1)
-       lfd     fr2,32(r1)
-       bl      fpdisable
-       lwz     r0,36(r1)
-       mtlr    r0
-       addi    r1,r1,32
-       blr
index 17d2db7..09c6525 100644 (file)
@@ -149,32 +149,6 @@ SECTIONS
 
   . = ALIGN(4096);
   _sextratext = .;
-  __pmac_begin = .;
-  .pmac.text : { *(.pmac.text) }
-  .pmac.data : { *(.pmac.data) }
-  . = ALIGN(4096);
-  __pmac_end = .;
-
-  . = ALIGN(4096);
-  __prep_begin = .;
-  .prep.text : { *(.prep.text) }
-  .prep.data : { *(.prep.data) }
-  . = ALIGN(4096);
-  __prep_end = .;
-
-  . = ALIGN(4096);
-  __chrp_begin = .;
-  .chrp.text : { *(.chrp.text) }
-  .chrp.data : { *(.chrp.data) }
-  . = ALIGN(4096);
-  __chrp_end = .;
-
-  . = ALIGN(4096);
-  __openfirmware_begin = .;
-  .openfirmware.text : { *(.openfirmware.text) }
-  .openfirmware.data : { *(.openfirmware.data) }
-  . = ALIGN(4096);
-  __openfirmware_end = .;
   _eextratext = .;
 
   __bss_start = .;
index 36c9b97..2e258c4 100644 (file)
@@ -65,9 +65,9 @@
        .stabs  "arch/ppc/lib/",N_SO,0,0,0f
        .stabs  "string.S",N_SO,0,0,0f
 
-CACHELINE_BYTES = L1_CACHE_LINE_SIZE
-LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE
-CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1)
+CACHELINE_BYTES = L1_CACHE_BYTES
+LG_CACHELINE_BYTES = L1_CACHE_SHIFT
+CACHELINE_MASK = (L1_CACHE_BYTES-1)
 
 _GLOBAL(strcpy)
        addi    r5,r3,-1
@@ -265,12 +265,12 @@ _GLOBAL(cacheable_memcpy)
        dcbz    r11,r6
 #endif
        COPY_16_BYTES
-#if L1_CACHE_LINE_SIZE >= 32
+#if L1_CACHE_BYTES >= 32
        COPY_16_BYTES
-#if L1_CACHE_LINE_SIZE >= 64
+#if L1_CACHE_BYTES >= 64
        COPY_16_BYTES
        COPY_16_BYTES
-#if L1_CACHE_LINE_SIZE >= 128
+#if L1_CACHE_BYTES >= 128
        COPY_16_BYTES
        COPY_16_BYTES
        COPY_16_BYTES
@@ -485,12 +485,12 @@ _GLOBAL(__copy_tofrom_user)
        .text
 /* the main body of the cacheline loop */
        COPY_16_BYTES_WITHEX(0)
-#if L1_CACHE_LINE_SIZE >= 32
+#if L1_CACHE_BYTES >= 32
        COPY_16_BYTES_WITHEX(1)
-#if L1_CACHE_LINE_SIZE >= 64
+#if L1_CACHE_BYTES >= 64
        COPY_16_BYTES_WITHEX(2)
        COPY_16_BYTES_WITHEX(3)
-#if L1_CACHE_LINE_SIZE >= 128
+#if L1_CACHE_BYTES >= 128
        COPY_16_BYTES_WITHEX(4)
        COPY_16_BYTES_WITHEX(5)
        COPY_16_BYTES_WITHEX(6)
@@ -544,12 +544,12 @@ _GLOBAL(__copy_tofrom_user)
  * 104f (if in read part) or 105f (if in write part), after updating r5
  */
        COPY_16_BYTES_EXCODE(0)
-#if L1_CACHE_LINE_SIZE >= 32
+#if L1_CACHE_BYTES >= 32
        COPY_16_BYTES_EXCODE(1)
-#if L1_CACHE_LINE_SIZE >= 64
+#if L1_CACHE_BYTES >= 64
        COPY_16_BYTES_EXCODE(2)
        COPY_16_BYTES_EXCODE(3)
-#if L1_CACHE_LINE_SIZE >= 128
+#if L1_CACHE_BYTES >= 128
        COPY_16_BYTES_EXCODE(4)
        COPY_16_BYTES_EXCODE(5)
        COPY_16_BYTES_EXCODE(6)
index f421a4b..db94efd 100644 (file)
@@ -69,15 +69,12 @@ int init_bootmem_done;
 int boot_mapsize;
 #ifdef CONFIG_PPC_PMAC
 unsigned long agp_special_page;
+EXPORT_SYMBOL(agp_special_page);
 #endif
 
 extern char _end[];
 extern char etext[], _stext[];
 extern char __init_begin, __init_end;
-extern char __prep_begin, __prep_end;
-extern char __chrp_begin, __chrp_end;
-extern char __pmac_begin, __pmac_end;
-extern char __openfirmware_begin, __openfirmware_end;
 
 #ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
@@ -167,14 +164,6 @@ void free_initmem(void)
 
        printk ("Freeing unused kernel memory:");
        FREESEC(init);
-       if (_machine != _MACH_Pmac)
-               FREESEC(pmac);
-       if (_machine != _MACH_chrp)
-               FREESEC(chrp);
-       if (_machine != _MACH_prep)
-               FREESEC(prep);
-       if (!have_of)
-               FREESEC(openfirmware);
        printk("\n");
        ppc_md.progress = NULL;
 #undef FREESEC
diff --git a/arch/ppc/oprofile/common.c b/arch/ppc/oprofile/common.c
deleted file mode 100644 (file)
index 3169c67..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * PPC 32 oprofile support
- * Based on PPC64 oprofile support
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Copyright (C) Freescale Semiconductor, Inc 2004
- *
- * Author: Andy Fleming
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/oprofile.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/perfmon.h>
-#include <asm/cputable.h>
-
-#include "op_impl.h"
-
-static struct op_ppc32_model *model;
-
-static struct op_counter_config ctr[OP_MAX_COUNTER];
-static struct op_system_config sys;
-
-static void op_handle_interrupt(struct pt_regs *regs)
-{
-       model->handle_interrupt(regs, ctr);
-}
-
-static int op_ppc32_setup(void)
-{
-       /* Install our interrupt handler into the existing hook.  */
-       if(request_perfmon_irq(&op_handle_interrupt))
-               return -EBUSY;
-
-       mb();
-
-       /* Pre-compute the values to stuff in the hardware registers.  */
-       model->reg_setup(ctr, &sys, model->num_counters);
-
-#if 0
-       /* FIXME: Make multi-cpu work */
-       /* Configure the registers on all cpus.  */
-       on_each_cpu(model->reg_setup, NULL, 0, 1);
-#endif
-
-       return 0;
-}
-
-static void op_ppc32_shutdown(void)
-{
-       mb();
-
-       /* Remove our interrupt handler. We may be removing this module. */
-       free_perfmon_irq();
-}
-
-static void op_ppc32_cpu_start(void *dummy)
-{
-       model->start(ctr);
-}
-
-static int op_ppc32_start(void)
-{
-       on_each_cpu(op_ppc32_cpu_start, NULL, 0, 1);
-       return 0;
-}
-
-static inline void op_ppc32_cpu_stop(void *dummy)
-{
-       model->stop();
-}
-
-static void op_ppc32_stop(void)
-{
-       on_each_cpu(op_ppc32_cpu_stop, NULL, 0, 1);
-}
-
-static int op_ppc32_create_files(struct super_block *sb, struct dentry *root)
-{
-       int i;
-
-       for (i = 0; i < model->num_counters; ++i) {
-               struct dentry *dir;
-               char buf[3];
-
-               snprintf(buf, sizeof buf, "%d", i);
-               dir = oprofilefs_mkdir(sb, root, buf);
-
-               oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
-               oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
-               oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
-               oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
-               oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
-
-               /* FIXME: Not sure if this is used */
-               oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
-       }
-
-       oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
-       oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
-
-       /* Default to tracing both kernel and user */
-       sys.enable_kernel = 1;
-       sys.enable_user = 1;
-
-       return 0;
-}
-
-static struct oprofile_operations oprof_ppc32_ops = {
-       .create_files   = op_ppc32_create_files,
-       .setup          = op_ppc32_setup,
-       .shutdown       = op_ppc32_shutdown,
-       .start          = op_ppc32_start,
-       .stop           = op_ppc32_stop,
-       .cpu_type       = NULL          /* To be filled in below. */
-};
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
-       char *name;
-       int cpu_id = smp_processor_id();
-
-#ifdef CONFIG_FSL_BOOKE
-       model = &op_model_fsl_booke;
-#else
-       return -ENODEV;
-#endif
-
-       name = kmalloc(32, GFP_KERNEL);
-
-       if (NULL == name)
-               return -ENOMEM;
-
-       sprintf(name, "ppc/%s", cur_cpu_spec[cpu_id]->cpu_name);
-
-       oprof_ppc32_ops.cpu_type = name;
-
-       model->num_counters = cur_cpu_spec[cpu_id]->num_pmcs;
-
-       *ops = oprof_ppc32_ops;
-
-       printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
-              oprof_ppc32_ops.cpu_type);
-
-       return 0;
-}
-
-void oprofile_arch_exit(void)
-{
-       kfree(oprof_ppc32_ops.cpu_type);
-       oprof_ppc32_ops.cpu_type = NULL;
-}
diff --git a/arch/ppc/oprofile/op_impl.h b/arch/ppc/oprofile/op_impl.h
deleted file mode 100644 (file)
index bc336dc..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Based on alpha version.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef OP_IMPL_H
-#define OP_IMPL_H 1
-
-#define OP_MAX_COUNTER 8
-
-/* Per-counter configuration as set via oprofilefs.  */
-struct op_counter_config {
-       unsigned long enabled;
-       unsigned long event;
-       unsigned long count;
-       unsigned long kernel;
-       unsigned long user;
-       unsigned long unit_mask;
-};
-
-/* System-wide configuration as set via oprofilefs.  */
-struct op_system_config {
-       unsigned long enable_kernel;
-       unsigned long enable_user;
-};
-
-/* Per-arch configuration */
-struct op_ppc32_model {
-       void (*reg_setup) (struct op_counter_config *,
-                          struct op_system_config *,
-                          int num_counters);
-       void (*start) (struct op_counter_config *);
-       void (*stop) (void);
-       void (*handle_interrupt) (struct pt_regs *,
-                                 struct op_counter_config *);
-       int num_counters;
-};
-
-#endif /* OP_IMPL_H */
index 27b778a..d32ae11 100644 (file)
@@ -90,7 +90,7 @@ ebony_calibrate_decr(void)
         * on Rev. C silicon then errata forces us to
         * use the internal clock.
         */
-       if (strcmp(cur_cpu_spec[0]->cpu_name, "440GP Rev. B") == 0)
+       if (strcmp(cur_cpu_spec->cpu_name, "440GP Rev. B") == 0)
                freq = EBONY_440GP_RB_SYSCLK;
        else
                freq = EBONY_440GP_RC_SYSCLK;
index 1584cd7..58e44c0 100644 (file)
@@ -19,7 +19,6 @@
 
 #include <linux/config.h>
 #include <linux/init.h>
-#include <linux/seq_file.h>
 #include <syslib/ppc83xx_setup.h>
 #include <asm/ppcboot.h>
 
index 3875e83..84acf6e 100644 (file)
@@ -19,7 +19,6 @@
 
 #include <linux/config.h>
 #include <linux/init.h>
-#include <linux/seq_file.h>
 #include <asm/ppcboot.h>
 
 #define BOARD_CCSRBAR          ((uint)0xe0000000)
index 7bcc6c3..95fdf4b 100644 (file)
@@ -21,7 +21,6 @@
 
 #include <linux/config.h>
 #include <linux/init.h>
-#include <linux/seq_file.h>
 #include <asm/ppcboot.h>
 
 #define BOARD_CCSRBAR          ((uint)0xe0000000)
index df6ff98..48a4a51 100644 (file)
@@ -541,7 +541,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
 
        ppc_md.setup_arch = chestnut_setup_arch;
        ppc_md.show_cpuinfo = chestnut_show_cpuinfo;
-       ppc_md.irq_canonicalize = NULL;
        ppc_md.init_IRQ = mv64360_init_irq;
        ppc_md.get_irq = mv64360_get_irq;
        ppc_md.init = NULL;
index 7d3fbb5..bd047aa 100644 (file)
@@ -29,7 +29,7 @@ void __iomem *gg2_pci_config_base;
  * limit the bus number to 3 bits
  */
 
-int __chrp gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
+int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
                           int len, u32 *val)
 {
        volatile void __iomem *cfg_data;
@@ -56,7 +56,7 @@ int __chrp gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
        return PCIBIOS_SUCCESSFUL;
 }
 
-int __chrp gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
+int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
                            int len, u32 val)
 {
        volatile void __iomem *cfg_data;
@@ -92,7 +92,7 @@ static struct pci_ops gg2_pci_ops =
 /*
  * Access functions for PCI config space using RTAS calls.
  */
-int __chrp
+int
 rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
                 int len, u32 *val)
 {
@@ -108,7 +108,7 @@ rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
        return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
 }
 
-int __chrp
+int
 rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
                  int len, u32 val)
 {
@@ -203,7 +203,7 @@ static void __init setup_peg2(struct pci_controller *hose, struct device_node *d
                printk ("RTAS supporting Pegasos OF not found, please upgrade"
                        " your firmware\n");
        }
-       pci_assign_all_busses = 1;
+       pci_assign_all_buses = 1;
 }
 
 void __init
index 66346f0..56c53bb 100644 (file)
@@ -104,7 +104,7 @@ static const char *gg2_cachemodes[4] = {
        "Disabled", "Write-Through", "Copy-Back", "Transparent Mode"
 };
 
-int __chrp
+int
 chrp_show_cpuinfo(struct seq_file *m)
 {
        int i, sdramen;
@@ -302,7 +302,7 @@ void __init chrp_setup_arch(void)
        pci_create_OF_bus_map();
 }
 
-void __chrp
+void
 chrp_event_scan(void)
 {
        unsigned char log[1024];
@@ -313,7 +313,7 @@ chrp_event_scan(void)
        ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
 }
 
-void __chrp
+void
 chrp_restart(char *cmd)
 {
        printk("RTAS system-reboot returned %d\n",
@@ -321,7 +321,7 @@ chrp_restart(char *cmd)
        for (;;);
 }
 
-void __chrp
+void
 chrp_power_off(void)
 {
        /* allow power on only with power button press */
@@ -330,20 +330,12 @@ chrp_power_off(void)
        for (;;);
 }
 
-void __chrp
+void
 chrp_halt(void)
 {
        chrp_power_off();
 }
 
-u_int __chrp
-chrp_irq_canonicalize(u_int irq)
-{
-       if (irq == 2)
-               return 9;
-       return irq;
-}
-
 /*
  * Finds the open-pic node and sets OpenPIC_Addr based on its reg property.
  * Then checks if it has an interrupt-ranges property.  If it does then
@@ -499,6 +491,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
        DMA_MODE_READ = 0x44;
        DMA_MODE_WRITE = 0x48;
        isa_io_base = CHRP_ISA_IO_BASE;         /* default value */
+       ppc_do_canonicalize_irqs = 1;
 
        if (root)
                machine = get_property(root, "model", NULL);
@@ -517,7 +510,6 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
        ppc_md.show_percpuinfo = of_show_percpuinfo;
        ppc_md.show_cpuinfo   = chrp_show_cpuinfo;
 
-       ppc_md.irq_canonicalize = chrp_irq_canonicalize;
        ppc_md.init_IRQ       = chrp_init_IRQ;
        if (_chrp_type == _CHRP_Pegasos)
                ppc_md.get_irq        = i8259_irq;
@@ -561,7 +553,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
 #endif
 
 #ifdef CONFIG_SMP
-       ppc_md.smp_ops = &chrp_smp_ops;
+       smp_ops = &chrp_smp_ops;
 #endif /* CONFIG_SMP */
 
        /*
@@ -571,7 +563,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
        if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
 }
 
-void __chrp
+void
 rtas_display_progress(char *s, unsigned short hex)
 {
        int width;
@@ -598,7 +590,7 @@ rtas_display_progress(char *s, unsigned short hex)
        call_rtas( "display-character", 1, 1, NULL, ' ' );
 }
 
-void __chrp
+void
 rtas_indicator_progress(char *s, unsigned short hex)
 {
        call_rtas("set-indicator", 3, 1, NULL, 6, 0, hex);
index 0ea1f7d..97e5395 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/residual.h>
 #include <asm/time.h>
 #include <asm/open_pic.h>
+#include <asm/machdep.h>
 
 extern unsigned long smp_chrp_cpu_nr;
 
@@ -88,7 +89,7 @@ smp_chrp_take_timebase(void)
 }
 
 /* CHRP with openpic */
-struct smp_ops_t chrp_smp_ops __chrpdata = {
+struct smp_ops_t chrp_smp_ops = {
        .message_pass = smp_openpic_message_pass,
        .probe = smp_chrp_probe,
        .kick_cpu = smp_chrp_kick_cpu,
index 6037ce7..29d074c 100644 (file)
@@ -52,7 +52,7 @@ long __init chrp_time_init(void)
        return 0;
 }
 
-int __chrp chrp_cmos_clock_read(int addr)
+int chrp_cmos_clock_read(int addr)
 {
        if (nvram_as1 != 0)
                outb(addr>>8, nvram_as1);
@@ -60,7 +60,7 @@ int __chrp chrp_cmos_clock_read(int addr)
        return (inb(nvram_data));
 }
 
-void __chrp chrp_cmos_clock_write(unsigned long val, int addr)
+void chrp_cmos_clock_write(unsigned long val, int addr)
 {
        if (nvram_as1 != 0)
                outb(addr>>8, nvram_as1);
@@ -72,7 +72,7 @@ void __chrp chrp_cmos_clock_write(unsigned long val, int addr)
 /*
  * Set the hardware clock. -- Cort
  */
-int __chrp chrp_set_rtc_time(unsigned long nowtime)
+int chrp_set_rtc_time(unsigned long nowtime)
 {
        unsigned char save_control, save_freq_select;
        struct rtc_time tm;
@@ -118,7 +118,7 @@ int __chrp chrp_set_rtc_time(unsigned long nowtime)
        return 0;
 }
 
-unsigned long __chrp chrp_get_rtc_time(void)
+unsigned long chrp_get_rtc_time(void)
 {
        unsigned int year, mon, day, hour, min, sec;
        int uip, i;
index 9811a8a..53388a1 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/bootinfo.h>
 #include <asm/ppcboot.h>
 #include <asm/mv64x60.h>
+#include <asm/machdep.h>
 #include <platforms/ev64360.h>
 
 #define BOARD_VENDOR    "Marvell"
index 3a5ff9f..729897c 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/time.h>
 #include <asm/open_pic.h>
 #include <asm/bootinfo.h>
+#include <asm/machdep.h>
 
 void gemini_find_bridges(void);
 static int gemini_get_clock_speed(void);
@@ -555,7 +556,6 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
 
        ppc_md.setup_arch = gemini_setup_arch;
        ppc_md.show_cpuinfo = gemini_show_cpuinfo;
-       ppc_md.irq_canonicalize = NULL;
        ppc_md.init_IRQ = gemini_init_IRQ;
        ppc_md.get_irq = openpic_get_irq;
        ppc_md.init = NULL;
@@ -575,6 +575,6 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
        ppc_md.pcibios_fixup_bus = gemini_pcibios_fixup;
 
 #ifdef CONFIG_SMP
-       ppc_md.smp_ops = &gemini_smp_ops;
+       smp_ops = &gemini_smp_ops;
 #endif /* CONFIG_SMP */
 }
index ff37968..2cc12b0 100644 (file)
@@ -753,7 +753,7 @@ static int smp_hdpu_probe(void)
 }
 
 static void
-smp_hdpu_message_pass(int target, int msg, unsigned long data, int wait)
+smp_hdpu_message_pass(int target, int msg)
 {
        if (msg > 0x3) {
                printk("SMP %d: smp_message_pass: unknown msg %d\n",
@@ -949,7 +949,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
 #endif                         /* CONFIG_SERIAL_TEXT_DEBUG */
 
 #ifdef CONFIG_SMP
-       ppc_md.smp_ops = &hdpu_smp_ops;
+       smp_ops = &hdpu_smp_ops;
 #endif                         /* CONFIG_SMP */
 
 #if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH)
index 2b53afa..a301c5a 100644 (file)
@@ -42,6 +42,7 @@
 #include <asm/ppcboot.h>
 #include <asm/mv64x60.h>
 #include <platforms/katana.h>
+#include <asm/machdep.h>
 
 static struct mv64x60_handle   bh;
 static katana_id_t             katana_id;
@@ -520,7 +521,7 @@ katana_fixup_resources(struct pci_dev *dev)
 {
        u16     v16;
 
-       pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_LINE_SIZE>>2);
+       pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES>>2);
 
        pci_read_config_word(dev, PCI_COMMAND, &v16);
        v16 |= PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK;
index b604cf8..d44cc99 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/io.h>
 #include <asm/mpc52xx.h>
 #include <asm/ppc_sys.h>
+#include <asm/machdep.h>
 
 #include <syslib/mpc52xx_pci.h>
 
index a556952..800c56a 100644 (file)
@@ -144,15 +144,6 @@ lopec_show_cpuinfo(struct seq_file *m)
        return 0;
 }
 
-static u32
-lopec_irq_canonicalize(u32 irq)
-{
-       if (irq == 2)
-               return 9;
-       else
-               return irq;
-}
-
 static void
 lopec_restart(char *cmd)
 {
@@ -379,10 +370,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
        ISA_DMA_THRESHOLD = 0x00ffffff;
        DMA_MODE_READ = 0x44;
        DMA_MODE_WRITE = 0x48;
+       ppc_do_canonicalize_irqs = 1;
 
        ppc_md.setup_arch = lopec_setup_arch;
        ppc_md.show_cpuinfo = lopec_show_cpuinfo;
-       ppc_md.irq_canonicalize = lopec_irq_canonicalize;
        ppc_md.init_IRQ = lopec_init_IRQ;
        ppc_md.get_irq = openpic_get_irq;
 
index 12446b9..f93a3f8 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/io.h>
 #include <asm/todc.h>
 #include <asm/bootinfo.h>
+#include <asm/machdep.h>
 
 #include <syslib/cpc700.h>
 
index ed2b1ce..8be2f7d 100644 (file)
@@ -37,7 +37,7 @@ static int backlight_req_enable = -1;
 static void backlight_callback(void *);
 static DECLARE_WORK(backlight_work, backlight_callback, NULL);
 
-void __pmac register_backlight_controller(struct backlight_controller *ctrler,
+void register_backlight_controller(struct backlight_controller *ctrler,
                                          void *data, char *type)
 {
        struct device_node* bk_node;
@@ -99,7 +99,7 @@ void __pmac register_backlight_controller(struct backlight_controller *ctrler,
 }
 EXPORT_SYMBOL(register_backlight_controller);
 
-void __pmac unregister_backlight_controller(struct backlight_controller
+void unregister_backlight_controller(struct backlight_controller
                                            *ctrler, void *data)
 {
        /* We keep the current backlight level (for now) */
@@ -108,7 +108,7 @@ void __pmac unregister_backlight_controller(struct backlight_controller
 }
 EXPORT_SYMBOL(unregister_backlight_controller);
 
-static int __pmac __set_backlight_enable(int enable)
+static int __set_backlight_enable(int enable)
 {
        int rc;
 
@@ -122,7 +122,7 @@ static int __pmac __set_backlight_enable(int enable)
        release_console_sem();
        return rc;
 }
-int __pmac set_backlight_enable(int enable)
+int set_backlight_enable(int enable)
 {
        if (!backlighter)
                return -ENODEV;
@@ -133,7 +133,7 @@ int __pmac set_backlight_enable(int enable)
 
 EXPORT_SYMBOL(set_backlight_enable);
 
-int __pmac get_backlight_enable(void)
+int get_backlight_enable(void)
 {
        if (!backlighter)
                return -ENODEV;
@@ -141,7 +141,7 @@ int __pmac get_backlight_enable(void)
 }
 EXPORT_SYMBOL(get_backlight_enable);
 
-static int __pmac __set_backlight_level(int level)
+static int __set_backlight_level(int level)
 {
        int rc = 0;
 
@@ -165,7 +165,7 @@ static int __pmac __set_backlight_level(int level)
        }
        return rc;
 }
-int __pmac set_backlight_level(int level)
+int set_backlight_level(int level)
 {
        if (!backlighter)
                return -ENODEV;
@@ -176,7 +176,7 @@ int __pmac set_backlight_level(int level)
 
 EXPORT_SYMBOL(set_backlight_level);
 
-int __pmac get_backlight_level(void)
+int get_backlight_level(void)
 {
        if (!backlighter)
                return -ENODEV;
index d4bc5f6..fba7e4d 100644 (file)
@@ -136,7 +136,7 @@ static inline void debug_calc_bogomips(void)
 
 /* Switch CPU speed under 750FX CPU control
  */
-static int __pmac cpu_750fx_cpu_speed(int low_speed)
+static int cpu_750fx_cpu_speed(int low_speed)
 {
        u32 hid2;
 
@@ -172,7 +172,7 @@ static int __pmac cpu_750fx_cpu_speed(int low_speed)
        return 0;
 }
 
-static unsigned int __pmac cpu_750fx_get_cpu_speed(void)
+static unsigned int cpu_750fx_get_cpu_speed(void)
 {
        if (mfspr(SPRN_HID1) & HID1_PS)
                return low_freq;
@@ -181,7 +181,7 @@ static unsigned int __pmac cpu_750fx_get_cpu_speed(void)
 }
 
 /* Switch CPU speed using DFS */
-static int __pmac dfs_set_cpu_speed(int low_speed)
+static int dfs_set_cpu_speed(int low_speed)
 {
        if (low_speed == 0) {
                /* ramping up, set voltage first */
@@ -205,7 +205,7 @@ static int __pmac dfs_set_cpu_speed(int low_speed)
        return 0;
 }
 
-static unsigned int __pmac dfs_get_cpu_speed(void)
+static unsigned int dfs_get_cpu_speed(void)
 {
        if (mfspr(SPRN_HID1) & HID1_DFS)
                return low_freq;
@@ -216,7 +216,7 @@ static unsigned int __pmac dfs_get_cpu_speed(void)
 
 /* Switch CPU speed using slewing GPIOs
  */
-static int __pmac gpios_set_cpu_speed(int low_speed)
+static int gpios_set_cpu_speed(int low_speed)
 {
        int gpio, timeout = 0;
 
@@ -258,7 +258,7 @@ static int __pmac gpios_set_cpu_speed(int low_speed)
 
 /* Switch CPU speed under PMU control
  */
-static int __pmac pmu_set_cpu_speed(int low_speed)
+static int pmu_set_cpu_speed(int low_speed)
 {
        struct adb_request req;
        unsigned long save_l2cr;
@@ -354,7 +354,7 @@ static int __pmac pmu_set_cpu_speed(int low_speed)
        return 0;
 }
 
-static int __pmac do_set_cpu_speed(int speed_mode, int notify)
+static int do_set_cpu_speed(int speed_mode, int notify)
 {
        struct cpufreq_freqs freqs;
        unsigned long l3cr;
@@ -391,17 +391,17 @@ static int __pmac do_set_cpu_speed(int speed_mode, int notify)
        return 0;
 }
 
-static unsigned int __pmac pmac_cpufreq_get_speed(unsigned int cpu)
+static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
 {
        return cur_freq;
 }
 
-static int __pmac pmac_cpufreq_verify(struct cpufreq_policy *policy)
+static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
 {
        return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
 }
 
-static int __pmac pmac_cpufreq_target( struct cpufreq_policy *policy,
+static int pmac_cpufreq_target(        struct cpufreq_policy *policy,
                                        unsigned int target_freq,
                                        unsigned int relation)
 {
@@ -414,13 +414,13 @@ static int __pmac pmac_cpufreq_target(    struct cpufreq_policy *policy,
        return do_set_cpu_speed(newstate, 1);
 }
 
-unsigned int __pmac pmac_get_one_cpufreq(int i)
+unsigned int pmac_get_one_cpufreq(int i)
 {
        /* Supports only one CPU for now */
        return (i == 0) ? cur_freq : 0;
 }
 
-static int __pmac pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
+static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
        if (policy->cpu != 0)
                return -ENODEV;
@@ -433,7 +433,7 @@ static int __pmac pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
        return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
 }
 
-static u32 __pmac read_gpio(struct device_node *np)
+static u32 read_gpio(struct device_node *np)
 {
        u32 *reg = (u32 *)get_property(np, "reg", NULL);
        u32 offset;
@@ -452,7 +452,7 @@ static u32 __pmac read_gpio(struct device_node *np)
        return offset;
 }
 
-static int __pmac pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
+static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
 {
        /* Ok, this could be made a bit smarter, but let's be robust for now. We
         * always force a speed change to high speed before sleep, to make sure
@@ -468,7 +468,7 @@ static int __pmac pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message
        return 0;
 }
 
-static int __pmac pmac_cpufreq_resume(struct cpufreq_policy *policy)
+static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
 {
        /* If we resume, first check if we have a get() function */
        if (get_speed_proc)
@@ -501,7 +501,7 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
 };
 
 
-static int __pmac pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
+static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
 {
        struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
                                                                "voltage-gpio");
@@ -593,7 +593,7 @@ static int __pmac pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
        return 0;
 }
 
-static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode)
+static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
 {
        struct device_node *volt_gpio_np;
 
@@ -620,7 +620,7 @@ static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode)
        return 0;
 }
 
-static int __pmac pmac_cpufreq_init_750FX(struct device_node *cpunode)
+static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
 {
        struct device_node *volt_gpio_np;
        u32 pvr, *value;
index dd6d45a..58884a6 100644 (file)
@@ -63,7 +63,7 @@ extern struct device_node *k2_skiplist[2];
  * We use a single global lock to protect accesses. Each driver has
  * to take care of its own locking
  */
-static DEFINE_SPINLOCK(feature_lock  __pmacdata);
+static DEFINE_SPINLOCK(feature_lock);
 
 #define LOCK(flags)    spin_lock_irqsave(&feature_lock, flags);
 #define UNLOCK(flags)  spin_unlock_irqrestore(&feature_lock, flags);
@@ -72,9 +72,9 @@ static DEFINE_SPINLOCK(feature_lock  __pmacdata);
 /*
  * Instance of some macio stuffs
  */
-struct macio_chip macio_chips[MAX_MACIO_CHIPS]  __pmacdata;
+struct macio_chip macio_chips[MAX_MACIO_CHIPS];
 
-struct macio_chip* __pmac macio_find(struct device_node* child, int type)
+struct macio_chip* macio_find(struct device_node* child, int type)
 {
        while(child) {
                int     i;
@@ -89,7 +89,7 @@ struct macio_chip* __pmac macio_find(struct device_node* child, int type)
 }
 EXPORT_SYMBOL_GPL(macio_find);
 
-static const char* macio_names[] __pmacdata =
+static const char* macio_names[] =
 {
        "Unknown",
        "Grand Central",
@@ -116,10 +116,10 @@ static const char* macio_names[] __pmacdata =
 #define UN_BIS(r,v)    (UN_OUT((r), UN_IN(r) | (v)))
 #define UN_BIC(r,v)    (UN_OUT((r), UN_IN(r) & ~(v)))
 
-static struct device_node* uninorth_node __pmacdata;
-static u32 __iomem * uninorth_base __pmacdata;
-static u32 uninorth_rev __pmacdata;
-static int uninorth_u3 __pmacdata;
+static struct device_node* uninorth_node;
+static u32 __iomem * uninorth_base;
+static u32 uninorth_rev;
+static int uninorth_u3;
 static void __iomem *u3_ht;
 
 /*
@@ -142,13 +142,13 @@ struct pmac_mb_def
        struct feature_table_entry*     features;
        unsigned long                   board_flags;
 };
-static struct pmac_mb_def pmac_mb __pmacdata;
+static struct pmac_mb_def pmac_mb;
 
 /*
  * Here are the chip specific feature functions
  */
 
-static inline int __pmac
+static inline int
 simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int value)
 {
        struct macio_chip*      macio;
@@ -170,7 +170,7 @@ simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int
 
 #ifndef CONFIG_POWER4
 
-static long __pmac
+static long
 ohare_htw_scc_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio;
@@ -263,21 +263,21 @@ ohare_htw_scc_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 ohare_floppy_enable(struct device_node* node, long param, long value)
 {
        return simple_feature_tweak(node, macio_ohare,
                OHARE_FCR, OH_FLOPPY_ENABLE, value);
 }
 
-static long __pmac
+static long
 ohare_mesh_enable(struct device_node* node, long param, long value)
 {
        return simple_feature_tweak(node, macio_ohare,
                OHARE_FCR, OH_MESH_ENABLE, value);
 }
 
-static long __pmac
+static long
 ohare_ide_enable(struct device_node* node, long param, long value)
 {
        switch(param) {
@@ -298,7 +298,7 @@ ohare_ide_enable(struct device_node* node, long param, long value)
        }
 }
 
-static long __pmac
+static long
 ohare_ide_reset(struct device_node* node, long param, long value)
 {
        switch(param) {
@@ -313,7 +313,7 @@ ohare_ide_reset(struct device_node* node, long param, long value)
        }
 }
 
-static long __pmac
+static long
 ohare_sleep_state(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio = &macio_chips[0];
@@ -329,7 +329,7 @@ ohare_sleep_state(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 heathrow_modem_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio;
@@ -373,7 +373,7 @@ heathrow_modem_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 heathrow_floppy_enable(struct device_node* node, long param, long value)
 {
        return simple_feature_tweak(node, macio_unknown,
@@ -382,7 +382,7 @@ heathrow_floppy_enable(struct device_node* node, long param, long value)
                value);
 }
 
-static long __pmac
+static long
 heathrow_mesh_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio;
@@ -411,7 +411,7 @@ heathrow_mesh_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 heathrow_ide_enable(struct device_node* node, long param, long value)
 {
        switch(param) {
@@ -426,7 +426,7 @@ heathrow_ide_enable(struct device_node* node, long param, long value)
        }
 }
 
-static long __pmac
+static long
 heathrow_ide_reset(struct device_node* node, long param, long value)
 {
        switch(param) {
@@ -441,7 +441,7 @@ heathrow_ide_reset(struct device_node* node, long param, long value)
        }
 }
 
-static long __pmac
+static long
 heathrow_bmac_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio;
@@ -470,7 +470,7 @@ heathrow_bmac_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 heathrow_sound_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio;
@@ -501,16 +501,16 @@ heathrow_sound_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static u32 save_fcr[6] __pmacdata;
-static u32 save_mbcr __pmacdata;
-static u32 save_gpio_levels[2] __pmacdata;
-static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT] __pmacdata;
-static u8 save_gpio_normal[KEYLARGO_GPIO_CNT] __pmacdata;
-static u32 save_unin_clock_ctl __pmacdata;
-static struct dbdma_regs save_dbdma[13] __pmacdata;
-static struct dbdma_regs save_alt_dbdma[13] __pmacdata;
+static u32 save_fcr[6];
+static u32 save_mbcr;
+static u32 save_gpio_levels[2];
+static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
+static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
+static u32 save_unin_clock_ctl;
+static struct dbdma_regs save_dbdma[13];
+static struct dbdma_regs save_alt_dbdma[13];
 
-static void __pmac
+static void
 dbdma_save(struct macio_chip* macio, struct dbdma_regs* save)
 {
        int i;
@@ -527,7 +527,7 @@ dbdma_save(struct macio_chip* macio, struct dbdma_regs* save)
        }
 }
 
-static void __pmac
+static void
 dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save)
 {
        int i;
@@ -547,7 +547,7 @@ dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save)
        }
 }
 
-static void __pmac
+static void
 heathrow_sleep(struct macio_chip* macio, int secondary)
 {
        if (secondary) {
@@ -580,7 +580,7 @@ heathrow_sleep(struct macio_chip* macio, int secondary)
        (void)MACIO_IN32(HEATHROW_FCR);
 }
 
-static void __pmac
+static void
 heathrow_wakeup(struct macio_chip* macio, int secondary)
 {
        if (secondary) {
@@ -605,7 +605,7 @@ heathrow_wakeup(struct macio_chip* macio, int secondary)
        }
 }
 
-static long __pmac
+static long
 heathrow_sleep_state(struct device_node* node, long param, long value)
 {
        if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
@@ -622,7 +622,7 @@ heathrow_sleep_state(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 core99_scc_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio;
@@ -723,7 +723,7 @@ core99_scc_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 core99_modem_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio;
@@ -775,7 +775,7 @@ core99_modem_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 pangea_modem_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio;
@@ -830,7 +830,7 @@ pangea_modem_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 core99_ata100_enable(struct device_node* node, long value)
 {
        unsigned long flags;
@@ -860,7 +860,7 @@ core99_ata100_enable(struct device_node* node, long value)
        return 0;
 }
 
-static long __pmac
+static long
 core99_ide_enable(struct device_node* node, long param, long value)
 {
        /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
@@ -883,7 +883,7 @@ core99_ide_enable(struct device_node* node, long param, long value)
        }
 }
 
-static long __pmac
+static long
 core99_ide_reset(struct device_node* node, long param, long value)
 {
        switch(param) {
@@ -901,7 +901,7 @@ core99_ide_reset(struct device_node* node, long param, long value)
        }
 }
 
-static long __pmac
+static long
 core99_gmac_enable(struct device_node* node, long param, long value)
 {
        unsigned long flags;
@@ -918,7 +918,7 @@ core99_gmac_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 core99_gmac_phy_reset(struct device_node* node, long param, long value)
 {
        unsigned long flags;
@@ -943,7 +943,7 @@ core99_gmac_phy_reset(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 core99_sound_chip_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio;
@@ -973,7 +973,7 @@ core99_sound_chip_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 core99_airport_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip*      macio;
@@ -1060,7 +1060,7 @@ core99_airport_enable(struct device_node* node, long param, long value)
 }
 
 #ifdef CONFIG_SMP
-static long __pmac
+static long
 core99_reset_cpu(struct device_node* node, long param, long value)
 {
        unsigned int reset_io = 0;
@@ -1104,7 +1104,7 @@ core99_reset_cpu(struct device_node* node, long param, long value)
 }
 #endif /* CONFIG_SMP */
 
-static long __pmac
+static long
 core99_usb_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip* macio;
@@ -1257,7 +1257,7 @@ core99_usb_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 core99_firewire_enable(struct device_node* node, long param, long value)
 {
        unsigned long flags;
@@ -1284,7 +1284,7 @@ core99_firewire_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 core99_firewire_cable_power(struct device_node* node, long param, long value)
 {
        unsigned long flags;
@@ -1315,7 +1315,7 @@ core99_firewire_cable_power(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 intrepid_aack_delay_enable(struct device_node* node, long param, long value)
 {
        unsigned long flags;
@@ -1336,7 +1336,7 @@ intrepid_aack_delay_enable(struct device_node* node, long param, long value)
 
 #endif /* CONFIG_POWER4 */
 
-static long __pmac
+static long
 core99_read_gpio(struct device_node* node, long param, long value)
 {
        struct macio_chip* macio = &macio_chips[0];
@@ -1345,7 +1345,7 @@ core99_read_gpio(struct device_node* node, long param, long value)
 }
 
 
-static long __pmac
+static long
 core99_write_gpio(struct device_node* node, long param, long value)
 {
        struct macio_chip* macio = &macio_chips[0];
@@ -1356,7 +1356,7 @@ core99_write_gpio(struct device_node* node, long param, long value)
 
 #ifdef CONFIG_POWER4
 
-static long __pmac
+static long
 g5_gmac_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip* macio = &macio_chips[0];
@@ -1380,7 +1380,7 @@ g5_gmac_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 g5_fw_enable(struct device_node* node, long param, long value)
 {
        struct macio_chip* macio = &macio_chips[0];
@@ -1403,7 +1403,7 @@ g5_fw_enable(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 g5_mpic_enable(struct device_node* node, long param, long value)
 {
        unsigned long flags;
@@ -1419,7 +1419,7 @@ g5_mpic_enable(struct device_node* node, long param, long value)
 }
 
 #ifdef CONFIG_SMP
-static long __pmac
+static long
 g5_reset_cpu(struct device_node* node, long param, long value)
 {
        unsigned int reset_io = 0;
@@ -1465,7 +1465,7 @@ g5_reset_cpu(struct device_node* node, long param, long value)
  * This takes the second CPU off the bus on dual CPU machines
  * running UP
  */
-void __pmac g5_phy_disable_cpu1(void)
+void g5_phy_disable_cpu1(void)
 {
        UN_OUT(U3_API_PHY_CONFIG_1, 0);
 }
@@ -1474,7 +1474,7 @@ void __pmac g5_phy_disable_cpu1(void)
 
 #ifndef CONFIG_POWER4
 
-static void __pmac
+static void
 keylargo_shutdown(struct macio_chip* macio, int sleep_mode)
 {
        u32 temp;
@@ -1528,7 +1528,7 @@ keylargo_shutdown(struct macio_chip* macio, int sleep_mode)
        (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
 }
 
-static void __pmac
+static void
 pangea_shutdown(struct macio_chip* macio, int sleep_mode)
 {
        u32 temp;
@@ -1562,7 +1562,7 @@ pangea_shutdown(struct macio_chip* macio, int sleep_mode)
        (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
 }
 
-static void __pmac
+static void
 intrepid_shutdown(struct macio_chip* macio, int sleep_mode)
 {
        u32 temp;
@@ -1591,7 +1591,7 @@ intrepid_shutdown(struct macio_chip* macio, int sleep_mode)
 }
 
 
-void __pmac pmac_tweak_clock_spreading(int enable)
+void pmac_tweak_clock_spreading(int enable)
 {
        struct macio_chip* macio = &macio_chips[0];
 
@@ -1698,7 +1698,7 @@ void __pmac pmac_tweak_clock_spreading(int enable)
 }
 
 
-static int __pmac
+static int
 core99_sleep(void)
 {
        struct macio_chip* macio;
@@ -1791,7 +1791,7 @@ core99_sleep(void)
        return 0;
 }
 
-static int __pmac
+static int
 core99_wake_up(void)
 {
        struct macio_chip* macio;
@@ -1854,7 +1854,7 @@ core99_wake_up(void)
        return 0;
 }
 
-static long __pmac
+static long
 core99_sleep_state(struct device_node* node, long param, long value)
 {
        /* Param == 1 means to enter the "fake sleep" mode that is
@@ -1884,7 +1884,7 @@ core99_sleep_state(struct device_node* node, long param, long value)
 
 #endif /* CONFIG_POWER4 */
 
-static long __pmac
+static long
 generic_dev_can_wake(struct device_node* node, long param, long value)
 {
        /* Todo: eventually check we are really dealing with on-board
@@ -1896,7 +1896,7 @@ generic_dev_can_wake(struct device_node* node, long param, long value)
        return 0;
 }
 
-static long __pmac
+static long
 generic_get_mb_info(struct device_node* node, long param, long value)
 {
        switch(param) {
@@ -1919,7 +1919,7 @@ generic_get_mb_info(struct device_node* node, long param, long value)
 
 /* Used on any machine
  */
-static struct feature_table_entry any_features[]  __pmacdata = {
+static struct feature_table_entry any_features[] = {
        { PMAC_FTR_GET_MB_INFO,         generic_get_mb_info },
        { PMAC_FTR_DEVICE_CAN_WAKE,     generic_dev_can_wake },
        { 0, NULL }
@@ -1931,7 +1931,7 @@ static struct feature_table_entry any_features[]  __pmacdata = {
  * 2400,3400 and 3500 series powerbooks. Some older desktops seem
  * to have issues with turning on/off those asic cells
  */
-static struct feature_table_entry ohare_features[]  __pmacdata = {
+static struct feature_table_entry ohare_features[] = {
        { PMAC_FTR_SCC_ENABLE,          ohare_htw_scc_enable },
        { PMAC_FTR_SWIM3_ENABLE,        ohare_floppy_enable },
        { PMAC_FTR_MESH_ENABLE,         ohare_mesh_enable },
@@ -1945,7 +1945,7 @@ static struct feature_table_entry ohare_features[]  __pmacdata = {
  * Separated as some features couldn't be properly tested
  * and the serial port control bits appear to confuse it.
  */
-static struct feature_table_entry heathrow_desktop_features[]  __pmacdata = {
+static struct feature_table_entry heathrow_desktop_features[] = {
        { PMAC_FTR_SWIM3_ENABLE,        heathrow_floppy_enable },
        { PMAC_FTR_MESH_ENABLE,         heathrow_mesh_enable },
        { PMAC_FTR_IDE_ENABLE,          heathrow_ide_enable },
@@ -1957,7 +1957,7 @@ static struct feature_table_entry heathrow_desktop_features[]  __pmacdata = {
 /* Heathrow based laptop, that is the Wallstreet and mainstreet
  * powerbooks.
  */
-static struct feature_table_entry heathrow_laptop_features[]  __pmacdata = {
+static struct feature_table_entry heathrow_laptop_features[] = {
        { PMAC_FTR_SCC_ENABLE,          ohare_htw_scc_enable },
        { PMAC_FTR_MODEM_ENABLE,        heathrow_modem_enable },
        { PMAC_FTR_SWIM3_ENABLE,        heathrow_floppy_enable },
@@ -1973,7 +1973,7 @@ static struct feature_table_entry heathrow_laptop_features[]  __pmacdata = {
 /* Paddington based machines
  * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
  */
-static struct feature_table_entry paddington_features[]  __pmacdata = {
+static struct feature_table_entry paddington_features[] = {
        { PMAC_FTR_SCC_ENABLE,          ohare_htw_scc_enable },
        { PMAC_FTR_MODEM_ENABLE,        heathrow_modem_enable },
        { PMAC_FTR_SWIM3_ENABLE,        heathrow_floppy_enable },
@@ -1991,7 +1991,7 @@ static struct feature_table_entry paddington_features[]  __pmacdata = {
  * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
  * used on iBook2 & iMac "flow power".
  */
-static struct feature_table_entry core99_features[]  __pmacdata = {
+static struct feature_table_entry core99_features[] = {
        { PMAC_FTR_SCC_ENABLE,          core99_scc_enable },
        { PMAC_FTR_MODEM_ENABLE,        core99_modem_enable },
        { PMAC_FTR_IDE_ENABLE,          core99_ide_enable },
@@ -2014,7 +2014,7 @@ static struct feature_table_entry core99_features[]  __pmacdata = {
 
 /* RackMac
  */
-static struct feature_table_entry rackmac_features[]  __pmacdata = {
+static struct feature_table_entry rackmac_features[] = {
        { PMAC_FTR_SCC_ENABLE,          core99_scc_enable },
        { PMAC_FTR_IDE_ENABLE,          core99_ide_enable },
        { PMAC_FTR_IDE_RESET,           core99_ide_reset },
@@ -2034,7 +2034,7 @@ static struct feature_table_entry rackmac_features[]  __pmacdata = {
 
 /* Pangea features
  */
-static struct feature_table_entry pangea_features[]  __pmacdata = {
+static struct feature_table_entry pangea_features[] = {
        { PMAC_FTR_SCC_ENABLE,          core99_scc_enable },
        { PMAC_FTR_MODEM_ENABLE,        pangea_modem_enable },
        { PMAC_FTR_IDE_ENABLE,          core99_ide_enable },
@@ -2054,7 +2054,7 @@ static struct feature_table_entry pangea_features[]  __pmacdata = {
 
 /* Intrepid features
  */
-static struct feature_table_entry intrepid_features[]  __pmacdata = {
+static struct feature_table_entry intrepid_features[] = {
        { PMAC_FTR_SCC_ENABLE,          core99_scc_enable },
        { PMAC_FTR_MODEM_ENABLE,        pangea_modem_enable },
        { PMAC_FTR_IDE_ENABLE,          core99_ide_enable },
@@ -2077,7 +2077,7 @@ static struct feature_table_entry intrepid_features[]  __pmacdata = {
 
 /* G5 features
  */
-static struct feature_table_entry g5_features[]  __pmacdata = {
+static struct feature_table_entry g5_features[] = {
        { PMAC_FTR_GMAC_ENABLE,         g5_gmac_enable },
        { PMAC_FTR_1394_ENABLE,         g5_fw_enable },
        { PMAC_FTR_ENABLE_MPIC,         g5_mpic_enable },
@@ -2091,7 +2091,7 @@ static struct feature_table_entry g5_features[]  __pmacdata = {
 
 #endif /* CONFIG_POWER4 */
 
-static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
+static struct pmac_mb_def pmac_mb_defs[] = {
 #ifndef CONFIG_POWER4
        /*
         * Desktops
@@ -2356,7 +2356,7 @@ static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
 /*
  * The toplevel feature_call callback
  */
-long __pmac
+long
 pmac_do_feature_call(unsigned int selector, ...)
 {
        struct device_node* node;
@@ -2939,8 +2939,8 @@ void __init pmac_check_ht_link(void)
  * Early video resume hook
  */
 
-static void (*pmac_early_vresume_proc)(void *data) __pmacdata;
-static void *pmac_early_vresume_data __pmacdata;
+static void (*pmac_early_vresume_proc)(void *data);
+static void *pmac_early_vresume_data;
 
 void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
 {
@@ -2953,7 +2953,7 @@ void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
 }
 EXPORT_SYMBOL(pmac_set_early_video_resume);
 
-void __pmac pmac_call_early_video_resume(void)
+void pmac_call_early_video_resume(void)
 {
        if (pmac_early_vresume_proc)
                pmac_early_vresume_proc(pmac_early_vresume_data);
@@ -2963,11 +2963,11 @@ void __pmac pmac_call_early_video_resume(void)
  * AGP related suspend/resume code
  */
 
-static struct pci_dev *pmac_agp_bridge __pmacdata;
-static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata;
-static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata;
+static struct pci_dev *pmac_agp_bridge;
+static int (*pmac_agp_suspend)(struct pci_dev *bridge);
+static int (*pmac_agp_resume)(struct pci_dev *bridge);
 
-void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
+void pmac_register_agp_pm(struct pci_dev *bridge,
                                 int (*suspend)(struct pci_dev *bridge),
                                 int (*resume)(struct pci_dev *bridge))
 {
@@ -2984,7 +2984,7 @@ void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
 }
 EXPORT_SYMBOL(pmac_register_agp_pm);
 
-void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
+void pmac_suspend_agp_for_card(struct pci_dev *dev)
 {
        if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
                return;
@@ -2994,7 +2994,7 @@ void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
 }
 EXPORT_SYMBOL(pmac_suspend_agp_for_card);
 
-void __pmac pmac_resume_agp_for_card(struct pci_dev *dev)
+void pmac_resume_agp_for_card(struct pci_dev *dev)
 {
        if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
                return;
index c9de642..8c9b008 100644 (file)
@@ -88,17 +88,17 @@ extern int system_running;
 static int (*core99_write_bank)(int bank, u8* datas);
 static int (*core99_erase_bank)(int bank);
 
-static char *nvram_image __pmacdata;
+static char *nvram_image;
 
 
-static unsigned char __pmac core99_nvram_read_byte(int addr)
+static unsigned char core99_nvram_read_byte(int addr)
 {
        if (nvram_image == NULL)
                return 0xff;
        return nvram_image[addr];
 }
 
-static void __pmac core99_nvram_write_byte(int addr, unsigned char val)
+static void core99_nvram_write_byte(int addr, unsigned char val)
 {
        if (nvram_image == NULL)
                return;
@@ -106,18 +106,18 @@ static void __pmac core99_nvram_write_byte(int addr, unsigned char val)
 }
 
 
-static unsigned char __openfirmware direct_nvram_read_byte(int addr)
+static unsigned char direct_nvram_read_byte(int addr)
 {
        return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
 }
 
-static void __openfirmware direct_nvram_write_byte(int addr, unsigned char val)
+static void direct_nvram_write_byte(int addr, unsigned char val)
 {
        out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
 }
 
 
-static unsigned char __pmac indirect_nvram_read_byte(int addr)
+static unsigned char indirect_nvram_read_byte(int addr)
 {
        unsigned char val;
        unsigned long flags;
@@ -130,7 +130,7 @@ static unsigned char __pmac indirect_nvram_read_byte(int addr)
        return val;
 }
 
-static void __pmac indirect_nvram_write_byte(int addr, unsigned char val)
+static void indirect_nvram_write_byte(int addr, unsigned char val)
 {
        unsigned long flags;
 
@@ -143,13 +143,13 @@ static void __pmac indirect_nvram_write_byte(int addr, unsigned char val)
 
 #ifdef CONFIG_ADB_PMU
 
-static void __pmac pmu_nvram_complete(struct adb_request *req)
+static void pmu_nvram_complete(struct adb_request *req)
 {
        if (req->arg)
                complete((struct completion *)req->arg);
 }
 
-static unsigned char __pmac pmu_nvram_read_byte(int addr)
+static unsigned char pmu_nvram_read_byte(int addr)
 {
        struct adb_request req;
        DECLARE_COMPLETION(req_complete); 
@@ -165,7 +165,7 @@ static unsigned char __pmac pmu_nvram_read_byte(int addr)
        return req.reply[0];
 }
 
-static void __pmac pmu_nvram_write_byte(int addr, unsigned char val)
+static void pmu_nvram_write_byte(int addr, unsigned char val)
 {
        struct adb_request req;
        DECLARE_COMPLETION(req_complete); 
@@ -183,7 +183,7 @@ static void __pmac pmu_nvram_write_byte(int addr, unsigned char val)
 #endif /* CONFIG_ADB_PMU */
 
 
-static u8 __pmac chrp_checksum(struct chrp_header* hdr)
+static u8 chrp_checksum(struct chrp_header* hdr)
 {
        u8 *ptr;
        u16 sum = hdr->signature;
@@ -194,7 +194,7 @@ static u8 __pmac chrp_checksum(struct chrp_header* hdr)
        return sum;
 }
 
-static u32 __pmac core99_calc_adler(u8 *buffer)
+static u32 core99_calc_adler(u8 *buffer)
 {
        int cnt;
        u32 low, high;
@@ -216,7 +216,7 @@ static u32 __pmac core99_calc_adler(u8 *buffer)
        return (high << 16) | low;
 }
 
-static u32 __pmac core99_check(u8* datas)
+static u32 core99_check(u8* datas)
 {
        struct core99_header* hdr99 = (struct core99_header*)datas;
 
@@ -235,7 +235,7 @@ static u32 __pmac core99_check(u8* datas)
        return hdr99->generation;
 }
 
-static int __pmac sm_erase_bank(int bank)
+static int sm_erase_bank(int bank)
 {
        int stat, i;
        unsigned long timeout;
@@ -267,7 +267,7 @@ static int __pmac sm_erase_bank(int bank)
        return 0;
 }
 
-static int __pmac sm_write_bank(int bank, u8* datas)
+static int sm_write_bank(int bank, u8* datas)
 {
        int i, stat = 0;
        unsigned long timeout;
@@ -302,7 +302,7 @@ static int __pmac sm_write_bank(int bank, u8* datas)
        return 0;
 }
 
-static int __pmac amd_erase_bank(int bank)
+static int amd_erase_bank(int bank)
 {
        int i, stat = 0;
        unsigned long timeout;
@@ -349,7 +349,7 @@ static int __pmac amd_erase_bank(int bank)
        return 0;
 }
 
-static int __pmac amd_write_bank(int bank, u8* datas)
+static int amd_write_bank(int bank, u8* datas)
 {
        int i, stat = 0;
        unsigned long timeout;
@@ -430,7 +430,7 @@ static void __init lookup_partitions(void)
        DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
 }
 
-static void __pmac core99_nvram_sync(void)
+static void core99_nvram_sync(void)
 {
        struct core99_header* hdr99;
        unsigned long flags;
@@ -554,12 +554,12 @@ void __init pmac_nvram_init(void)
        lookup_partitions();
 }
 
-int __pmac pmac_get_partition(int partition)
+int pmac_get_partition(int partition)
 {
        return nvram_partitions[partition];
 }
 
-u8 __pmac pmac_xpram_read(int xpaddr)
+u8 pmac_xpram_read(int xpaddr)
 {
        int offset = nvram_partitions[pmac_nvram_XPRAM];
 
@@ -569,7 +569,7 @@ u8 __pmac pmac_xpram_read(int xpaddr)
        return ppc_md.nvram_read_val(xpaddr + offset);
 }
 
-void __pmac pmac_xpram_write(int xpaddr, u8 data)
+void pmac_xpram_write(int xpaddr, u8 data)
 {
        int offset = nvram_partitions[pmac_nvram_XPRAM];
 
index 719fb49..786295b 100644 (file)
@@ -141,7 +141,7 @@ fixup_bus_range(struct device_node *bridge)
        |(((unsigned long)(off)) & 0xFCUL) \
        |1UL)
 
-static void volatile __iomem * __pmac
+static void volatile __iomem *
 macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset)
 {
        unsigned int caddr;
@@ -162,7 +162,7 @@ macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset)
        return hose->cfg_data + offset;
 }
 
-static int __pmac
+static int
 macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
                    int len, u32 *val)
 {
@@ -190,7 +190,7 @@ macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
        return PCIBIOS_SUCCESSFUL;
 }
 
-static int __pmac
+static int
 macrisc_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
                     int len, u32 val)
 {
@@ -230,7 +230,7 @@ static struct pci_ops macrisc_pci_ops =
 /*
  * Verifiy that a specific (bus, dev_fn) exists on chaos
  */
-static int __pmac
+static int
 chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
 {
        struct device_node *np;
@@ -252,7 +252,7 @@ chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
        return PCIBIOS_SUCCESSFUL;
 }
 
-static int __pmac
+static int
 chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
                  int len, u32 *val)
 {
@@ -264,7 +264,7 @@ chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
        return macrisc_read_config(bus, devfn, offset, len, val);
 }
 
-static int __pmac
+static int
 chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
                   int len, u32 val)
 {
@@ -294,7 +294,7 @@ static struct pci_ops chaos_pci_ops =
                + (((unsigned long)bus) << 16) \
                + 0x01000000UL)
 
-static void volatile __iomem * __pmac
+static void volatile __iomem *
 u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset)
 {
        if (bus == hose->first_busno) {
@@ -307,7 +307,7 @@ u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset)
                return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset);
 }
 
-static int __pmac
+static int
 u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
                    int len, u32 *val)
 {
@@ -357,7 +357,7 @@ u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
        return PCIBIOS_SUCCESSFUL;
 }
 
-static int __pmac
+static int
 u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
                     int len, u32 val)
 {
@@ -575,7 +575,7 @@ pmac_find_bridges(void)
         * some offset between bus number and domains for now when we
         * assign all busses should help for now
         */
-       if (pci_assign_all_busses)
+       if (pci_assign_all_buses)
                pcibios_assign_bus_offset = 0x10;
 
 #ifdef CONFIG_POWER4 
@@ -643,7 +643,7 @@ static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
 static int __init
 setup_uninorth(struct pci_controller* hose, struct reg_property* addr)
 {
-       pci_assign_all_busses = 1;
+       pci_assign_all_buses = 1;
        has_uninorth = 1;
        hose->ops = &macrisc_pci_ops;
        hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
@@ -677,7 +677,7 @@ setup_u3_agp(struct pci_controller* hose, struct reg_property* addr)
 {
        /* On G5, we move AGP up to high bus number so we don't need
         * to reassign bus numbers for HT. If we ever have P2P bridges
-        * on AGP, we'll have to move pci_assign_all_busses to the
+        * on AGP, we'll have to move pci_assign_all_buses to the
         * pci_controller structure so we enable it for AGP and not for
         * HT childs.
         * We hard code the address because of the different size of
@@ -899,7 +899,7 @@ pmac_pcibios_fixup(void)
        pcibios_fixup_OF_interrupts();
 }
 
-int __pmac
+int
 pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
 {
        struct device_node* node;
@@ -1096,7 +1096,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
  * Disable second function on K2-SATA, it's broken
  * and disable IO BARs on first one
  */
-void __pmac pmac_pci_fixup_k2_sata(struct pci_dev* dev)
+void pmac_pci_fixup_k2_sata(struct pci_dev* dev)
 {
        int i;
        u16 cmd;
index 2ce0588..9f2d95e 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/open_pic.h>
 #include <asm/xmon.h>
 #include <asm/pmac_feature.h>
+#include <asm/machdep.h>
 
 #include "pmac_pic.h"
 
@@ -53,7 +54,7 @@ struct pmac_irq_hw {
 };
 
 /* Default addresses */
-static volatile struct pmac_irq_hw *pmac_irq_hw[4] __pmacdata = {
+static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
         (struct pmac_irq_hw *) 0xf3000020,
         (struct pmac_irq_hw *) 0xf3000010,
         (struct pmac_irq_hw *) 0xf4000020,
@@ -64,22 +65,22 @@ static volatile struct pmac_irq_hw *pmac_irq_hw[4] __pmacdata = {
 #define OHARE_LEVEL_MASK       0x1ff00000
 #define HEATHROW_LEVEL_MASK    0x1ff00000
 
-static int max_irqs __pmacdata;
-static int max_real_irqs __pmacdata;
-static u32 level_mask[4] __pmacdata;
+static int max_irqs;
+static int max_real_irqs;
+static u32 level_mask[4];
 
-static DEFINE_SPINLOCK(pmac_pic_lock __pmacdata);
+static DEFINE_SPINLOCK(pmac_pic_lock);
 
 
 #define GATWICK_IRQ_POOL_SIZE        10
-static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE] __pmacdata;
+static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
 
 /*
  * Mark an irq as "lost".  This is only used on the pmac
  * since it can lose interrupts (see pmac_set_irq_mask).
  * -- Cort
  */
-void __pmac
+void
 __set_lost(unsigned long irq_nr, int nokick)
 {
        if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
@@ -89,7 +90,7 @@ __set_lost(unsigned long irq_nr, int nokick)
        }
 }
 
-static void __pmac
+static void
 pmac_mask_and_ack_irq(unsigned int irq_nr)
 {
         unsigned long bit = 1UL << (irq_nr & 0x1f);
@@ -114,7 +115,7 @@ pmac_mask_and_ack_irq(unsigned int irq_nr)
        spin_unlock_irqrestore(&pmac_pic_lock, flags);
 }
 
-static void __pmac pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
+static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
 {
         unsigned long bit = 1UL << (irq_nr & 0x1f);
         int i = irq_nr >> 5;
@@ -147,7 +148,7 @@ static void __pmac pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
 /* When an irq gets requested for the first client, if it's an
  * edge interrupt, we clear any previous one on the controller
  */
-static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr)
+static unsigned int pmac_startup_irq(unsigned int irq_nr)
 {
         unsigned long bit = 1UL << (irq_nr & 0x1f);
         int i = irq_nr >> 5;
@@ -160,20 +161,20 @@ static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr)
        return 0;
 }
 
-static void __pmac pmac_mask_irq(unsigned int irq_nr)
+static void pmac_mask_irq(unsigned int irq_nr)
 {
         clear_bit(irq_nr, ppc_cached_irq_mask);
         pmac_set_irq_mask(irq_nr, 0);
         mb();
 }
 
-static void __pmac pmac_unmask_irq(unsigned int irq_nr)
+static void pmac_unmask_irq(unsigned int irq_nr)
 {
         set_bit(irq_nr, ppc_cached_irq_mask);
         pmac_set_irq_mask(irq_nr, 0);
 }
 
-static void __pmac pmac_end_irq(unsigned int irq_nr)
+static void pmac_end_irq(unsigned int irq_nr)
 {
        if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
            && irq_desc[irq_nr].action) {
index d6356f4..55d2bef 100644 (file)
@@ -122,7 +122,7 @@ extern struct smp_ops_t psurge_smp_ops;
 extern struct smp_ops_t core99_smp_ops;
 #endif /* CONFIG_SMP */
 
-static int __pmac
+static int
 pmac_show_cpuinfo(struct seq_file *m)
 {
        struct device_node *np;
@@ -226,7 +226,7 @@ pmac_show_cpuinfo(struct seq_file *m)
        return 0;
 }
 
-static int __openfirmware
+static int
 pmac_show_percpuinfo(struct seq_file *m, int i)
 {
 #ifdef CONFIG_CPU_FREQ_PMAC
@@ -330,9 +330,9 @@ pmac_setup_arch(void)
 #ifdef CONFIG_SMP
        /* Check for Core99 */
        if (find_devices("uni-n") || find_devices("u3"))
-               ppc_md.smp_ops = &core99_smp_ops;
+               smp_ops = &core99_smp_ops;
        else
-               ppc_md.smp_ops = &psurge_smp_ops;
+               smp_ops = &psurge_smp_ops;
 #endif /* CONFIG_SMP */
 
        pci_create_OF_bus_map();
@@ -447,7 +447,7 @@ static int pmac_pm_enter(suspend_state_t state)
        enable_kernel_fp();
 
 #ifdef CONFIG_ALTIVEC
-       if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC)
+       if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
                enable_kernel_altivec();
 #endif /* CONFIG_ALTIVEC */
 
@@ -485,7 +485,7 @@ static int pmac_late_init(void)
 late_initcall(pmac_late_init);
 
 /* can't be __init - can be called whenever a disk is first accessed */
-void __pmac
+void
 note_bootable_part(dev_t dev, int part, int goodness)
 {
        static int found_boot = 0;
@@ -511,7 +511,7 @@ note_bootable_part(dev_t dev, int part, int goodness)
        }
 }
 
-static void __pmac
+static void
 pmac_restart(char *cmd)
 {
 #ifdef CONFIG_ADB_CUDA
@@ -536,7 +536,7 @@ pmac_restart(char *cmd)
        }
 }
 
-static void __pmac
+static void
 pmac_power_off(void)
 {
 #ifdef CONFIG_ADB_CUDA
@@ -561,7 +561,7 @@ pmac_power_off(void)
        }
 }
 
-static void __pmac
+static void
 pmac_halt(void)
 {
    pmac_power_off();
@@ -661,7 +661,6 @@ pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
        ppc_md.setup_arch     = pmac_setup_arch;
        ppc_md.show_cpuinfo   = pmac_show_cpuinfo;
        ppc_md.show_percpuinfo = pmac_show_percpuinfo;
-       ppc_md.irq_canonicalize = NULL;
        ppc_md.init_IRQ       = pmac_pic_init;
        ppc_md.get_irq        = pmac_get_irq; /* Changed later on ... */
 
index 88419c7..22b113d 100644 (file)
@@ -387,10 +387,10 @@ turn_on_mmu:
 #endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
 
        .section .data
-       .balign L1_CACHE_LINE_SIZE
+       .balign L1_CACHE_BYTES
 sleep_storage:
        .long 0
-       .balign L1_CACHE_LINE_SIZE, 0
+       .balign L1_CACHE_BYTES, 0
 
 #endif /* CONFIG_6xx */
        .section .text
index 794a239..26ff262 100644 (file)
@@ -186,7 +186,7 @@ static inline void psurge_clr_ipi(int cpu)
  */
 static unsigned long psurge_smp_message[NR_CPUS];
 
-void __pmac psurge_smp_message_recv(struct pt_regs *regs)
+void psurge_smp_message_recv(struct pt_regs *regs)
 {
        int cpu = smp_processor_id();
        int msg;
@@ -203,14 +203,13 @@ void __pmac psurge_smp_message_recv(struct pt_regs *regs)
                        smp_message_recv(msg, regs);
 }
 
-irqreturn_t __pmac psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
+irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
 {
        psurge_smp_message_recv(regs);
        return IRQ_HANDLED;
 }
 
-static void __pmac smp_psurge_message_pass(int target, int msg, unsigned long data,
-                                          int wait)
+static void smp_psurge_message_pass(int target, int msg)
 {
        int i;
 
@@ -629,7 +628,7 @@ void smp_core99_give_timebase(void)
 
 
 /* PowerSurge-style Macs */
-struct smp_ops_t psurge_smp_ops __pmacdata = {
+struct smp_ops_t psurge_smp_ops = {
        .message_pass   = smp_psurge_message_pass,
        .probe          = smp_psurge_probe,
        .kick_cpu       = smp_psurge_kick_cpu,
@@ -639,7 +638,7 @@ struct smp_ops_t psurge_smp_ops __pmacdata = {
 };
 
 /* Core99 Macs (dual G4s) */
-struct smp_ops_t core99_smp_ops __pmacdata = {
+struct smp_ops_t core99_smp_ops = {
        .message_pass   = smp_openpic_message_pass,
        .probe          = smp_core99_probe,
        .kick_cpu       = smp_core99_kick_cpu,
index efb819f..edb9fcc 100644 (file)
@@ -77,7 +77,7 @@ pmac_time_init(void)
 #endif
 }
 
-unsigned long __pmac
+unsigned long
 pmac_get_rtc_time(void)
 {
 #if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
@@ -118,7 +118,7 @@ pmac_get_rtc_time(void)
        return 0;
 }
 
-int __pmac
+int
 pmac_set_rtc_time(unsigned long nowtime)
 {
 #if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
@@ -210,7 +210,7 @@ via_calibrate_decr(void)
 /*
  * Reset the time after a sleep.
  */
-static int __pmac
+static int
 time_sleep_notify(struct pmu_sleep_notifier *self, int when)
 {
        static unsigned long time_diff;
@@ -235,7 +235,7 @@ time_sleep_notify(struct pmu_sleep_notifier *self, int when)
        return PBOOK_SLEEP_OK;
 }
 
-static struct pmu_sleep_notifier time_sleep_notifier __pmacdata = {
+static struct pmu_sleep_notifier time_sleep_notifier = {
        time_sleep_notify, SLEEP_LEVEL_MISC,
 };
 #endif /* CONFIG_PM */
index e70aae2..59eb330 100644 (file)
@@ -646,14 +646,6 @@ static void pplus_power_off(void)
        pplus_halt();
 }
 
-static unsigned int pplus_irq_canonicalize(u_int irq)
-{
-       if (irq == 2)
-               return 9;
-       else
-               return irq;
-}
-
 static void __init pplus_init_IRQ(void)
 {
        int i;
@@ -872,10 +864,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
        ISA_DMA_THRESHOLD = 0x00ffffff;
        DMA_MODE_READ = 0x44;
        DMA_MODE_WRITE = 0x48;
+       ppc_do_canonicalize_irqs = 1;
 
        ppc_md.setup_arch = pplus_setup_arch;
        ppc_md.show_cpuinfo = pplus_show_cpuinfo;
-       ppc_md.irq_canonicalize = pplus_irq_canonicalize;
        ppc_md.init_IRQ = pplus_init_IRQ;
        /* this gets changed later on if we have an OpenPIC -- Cort */
        ppc_md.get_irq = i8259_irq;
@@ -911,6 +903,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
        ppc_md.kgdb_map_scc = gen550_kgdb_map_scc;
 #endif
 #ifdef CONFIG_SMP
-       ppc_md.smp_ops = &pplus_smp_ops;
+       smp_ops = &pplus_smp_ops;
 #endif                         /* CONFIG_SMP */
 }
index 4760cb6..e50b999 100644 (file)
@@ -43,7 +43,7 @@ static unsigned long  *ProcInfo;
 /* Tables for known hardware */
 
 /* Motorola PowerStackII - Utah */
-static char Utah_pci_IRQ_map[23] __prepdata =
+static char Utah_pci_IRQ_map[23] =
 {
         0,   /* Slot 0  - unused */
         0,   /* Slot 1  - unused */
@@ -72,7 +72,7 @@ static char Utah_pci_IRQ_map[23] __prepdata =
         0,   /* Slot 22 - unused */
 };
 
-static char Utah_pci_IRQ_routes[] __prepdata =
+static char Utah_pci_IRQ_routes[] =
 {
         0,   /* Line 0 - Unused */
         9,   /* Line 1 */
@@ -84,7 +84,7 @@ static char Utah_pci_IRQ_routes[] __prepdata =
 
 /* Motorola PowerStackII - Omaha */
 /* no integrated SCSI or ethernet */
-static char Omaha_pci_IRQ_map[23] __prepdata =
+static char Omaha_pci_IRQ_map[23] =
 {
         0,   /* Slot 0  - unused */
         0,   /* Slot 1  - unused */
@@ -111,7 +111,7 @@ static char Omaha_pci_IRQ_map[23] __prepdata =
         0,
 };
 
-static char Omaha_pci_IRQ_routes[] __prepdata =
+static char Omaha_pci_IRQ_routes[] =
 {
         0,   /* Line 0 - Unused */
         9,   /* Line 1 */
@@ -121,7 +121,7 @@ static char Omaha_pci_IRQ_routes[] __prepdata =
 };
 
 /* Motorola PowerStack */
-static char Blackhawk_pci_IRQ_map[19] __prepdata =
+static char Blackhawk_pci_IRQ_map[19] =
 {
        0,      /* Slot 0  - unused */
        0,      /* Slot 1  - unused */
@@ -144,7 +144,7 @@ static char Blackhawk_pci_IRQ_map[19] __prepdata =
        3,      /* Slot P5 */
 };
 
-static char Blackhawk_pci_IRQ_routes[] __prepdata =
+static char Blackhawk_pci_IRQ_routes[] =
 {
        0,      /* Line 0 - Unused */
        9,      /* Line 1 */
@@ -154,7 +154,7 @@ static char Blackhawk_pci_IRQ_routes[] __prepdata =
 };
 
 /* Motorola Mesquite */
-static char Mesquite_pci_IRQ_map[23] __prepdata =
+static char Mesquite_pci_IRQ_map[23] =
 {
        0,      /* Slot 0  - unused */
        0,      /* Slot 1  - unused */
@@ -182,7 +182,7 @@ static char Mesquite_pci_IRQ_map[23] __prepdata =
 };
 
 /* Motorola Sitka */
-static char Sitka_pci_IRQ_map[21] __prepdata =
+static char Sitka_pci_IRQ_map[21] =
 {
        0,      /* Slot 0  - unused */
        0,      /* Slot 1  - unused */
@@ -208,7 +208,7 @@ static char Sitka_pci_IRQ_map[21] __prepdata =
 };
 
 /* Motorola MTX */
-static char MTX_pci_IRQ_map[23] __prepdata =
+static char MTX_pci_IRQ_map[23] =
 {
        0,      /* Slot 0  - unused */
        0,      /* Slot 1  - unused */
@@ -237,7 +237,7 @@ static char MTX_pci_IRQ_map[23] __prepdata =
 
 /* Motorola MTX Plus */
 /* Secondary bus interrupt routing is not supported yet */
-static char MTXplus_pci_IRQ_map[23] __prepdata =
+static char MTXplus_pci_IRQ_map[23] =
 {
         0,      /* Slot 0  - unused */
         0,      /* Slot 1  - unused */
@@ -264,13 +264,13 @@ static char MTXplus_pci_IRQ_map[23] __prepdata =
         0,      /* Slot 22 - unused */
 };
 
-static char Raven_pci_IRQ_routes[] __prepdata =
+static char Raven_pci_IRQ_routes[] =
 {
        0,      /* This is a dummy structure */
 };
 
 /* Motorola MVME16xx */
-static char Genesis_pci_IRQ_map[16] __prepdata =
+static char Genesis_pci_IRQ_map[16] =
 {
        0,      /* Slot 0  - unused */
        0,      /* Slot 1  - unused */
@@ -290,7 +290,7 @@ static char Genesis_pci_IRQ_map[16] __prepdata =
        0,      /* Slot 15 - unused */
 };
 
-static char Genesis_pci_IRQ_routes[] __prepdata =
+static char Genesis_pci_IRQ_routes[] =
 {
        0,      /* Line 0 - Unused */
        10,     /* Line 1 */
@@ -299,7 +299,7 @@ static char Genesis_pci_IRQ_routes[] __prepdata =
        15      /* Line 4 */
 };
 
-static char Genesis2_pci_IRQ_map[23] __prepdata =
+static char Genesis2_pci_IRQ_map[23] =
 {
        0,      /* Slot 0  - unused */
        0,      /* Slot 1  - unused */
@@ -327,7 +327,7 @@ static char Genesis2_pci_IRQ_map[23] __prepdata =
 };
 
 /* Motorola Series-E */
-static char Comet_pci_IRQ_map[23] __prepdata =
+static char Comet_pci_IRQ_map[23] =
 {
        0,      /* Slot 0  - unused */
        0,      /* Slot 1  - unused */
@@ -354,7 +354,7 @@ static char Comet_pci_IRQ_map[23] __prepdata =
        0,
 };
 
-static char Comet_pci_IRQ_routes[] __prepdata =
+static char Comet_pci_IRQ_routes[] =
 {
        0,      /* Line 0 - Unused */
        10,     /* Line 1 */
@@ -364,7 +364,7 @@ static char Comet_pci_IRQ_routes[] __prepdata =
 };
 
 /* Motorola Series-EX */
-static char Comet2_pci_IRQ_map[23] __prepdata =
+static char Comet2_pci_IRQ_map[23] =
 {
        0,      /* Slot 0  - unused */
        0,      /* Slot 1  - unused */
@@ -391,7 +391,7 @@ static char Comet2_pci_IRQ_map[23] __prepdata =
        0,
 };
 
-static char Comet2_pci_IRQ_routes[] __prepdata =
+static char Comet2_pci_IRQ_routes[] =
 {
        0,      /* Line 0 - Unused */
        10,     /* Line 1 */
@@ -405,7 +405,7 @@ static char Comet2_pci_IRQ_routes[] __prepdata =
  * This is actually based on the Carolina motherboard
  * -- Cort
  */
-static char ibm8xx_pci_IRQ_map[23] __prepdata = {
+static char ibm8xx_pci_IRQ_map[23] = {
         0, /* Slot 0  - unused */
         0, /* Slot 1  - unused */
         0, /* Slot 2  - unused */
@@ -431,7 +431,7 @@ static char ibm8xx_pci_IRQ_map[23] __prepdata = {
         2, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */
 };
 
-static char ibm8xx_pci_IRQ_routes[] __prepdata = {
+static char ibm8xx_pci_IRQ_routes[] = {
         0,      /* Line 0 - unused */
         15,     /* Line 1 */
         15,     /* Line 2 */
@@ -443,7 +443,7 @@ static char ibm8xx_pci_IRQ_routes[] __prepdata = {
  * a 6015 ibm board
  * -- Cort
  */
-static char ibm6015_pci_IRQ_map[23] __prepdata = {
+static char ibm6015_pci_IRQ_map[23] = {
         0, /* Slot 0  - unused */
         0, /* Slot 1  - unused */
         0, /* Slot 2  - unused */
@@ -469,7 +469,7 @@ static char ibm6015_pci_IRQ_map[23] __prepdata = {
         2, /* Slot 22 -  */
 };
 
-static char ibm6015_pci_IRQ_routes[] __prepdata = {
+static char ibm6015_pci_IRQ_routes[] = {
         0,      /* Line 0 - unused */
         13,     /* Line 1 */
         15,     /* Line 2 */
@@ -479,7 +479,7 @@ static char ibm6015_pci_IRQ_routes[] __prepdata = {
 
 
 /* IBM Nobis and Thinkpad 850 */
-static char Nobis_pci_IRQ_map[23] __prepdata ={
+static char Nobis_pci_IRQ_map[23] ={
         0, /* Slot 0  - unused */
         0, /* Slot 1  - unused */
         0, /* Slot 2  - unused */
@@ -498,7 +498,7 @@ static char Nobis_pci_IRQ_map[23] __prepdata ={
         0, /* Slot 15 - unused */
 };
 
-static char Nobis_pci_IRQ_routes[] __prepdata = {
+static char Nobis_pci_IRQ_routes[] = {
         0, /* Line 0 - Unused */
         13, /* Line 1 */
         13, /* Line 2 */
@@ -510,7 +510,7 @@ static char Nobis_pci_IRQ_routes[] __prepdata = {
  * IBM RS/6000 43p/140  -- paulus
  * XXX we should get all this from the residual data
  */
-static char ibm43p_pci_IRQ_map[23] __prepdata = {
+static char ibm43p_pci_IRQ_map[23] = {
         0, /* Slot 0  - unused */
         0, /* Slot 1  - unused */
         0, /* Slot 2  - unused */
@@ -536,7 +536,7 @@ static char ibm43p_pci_IRQ_map[23] __prepdata = {
         1, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */
 };
 
-static char ibm43p_pci_IRQ_routes[] __prepdata = {
+static char ibm43p_pci_IRQ_routes[] = {
         0,      /* Line 0 - unused */
         15,     /* Line 1 */
         15,     /* Line 2 */
@@ -559,7 +559,7 @@ struct powerplus_irq_list
  * are routed to OpenPIC inputs 5-8.  These values are offset by
  * 16 in the table to reflect the Linux kernel interrupt value.
  */
-struct powerplus_irq_list Powerplus_pci_IRQ_list __prepdata =
+struct powerplus_irq_list Powerplus_pci_IRQ_list =
 {
        {25, 26, 27, 28},
        {21, 22, 23, 24}
@@ -572,7 +572,7 @@ struct powerplus_irq_list Powerplus_pci_IRQ_list __prepdata =
  * are routed to OpenPIC inputs 12-15. These values are offset by
  * 16 in the table to reflect the Linux kernel interrupt value.
  */
-struct powerplus_irq_list Mesquite_pci_IRQ_list __prepdata =
+struct powerplus_irq_list Mesquite_pci_IRQ_list =
 {
        {24, 25, 26, 27},
        {28, 29, 30, 31}
@@ -582,7 +582,7 @@ struct powerplus_irq_list Mesquite_pci_IRQ_list __prepdata =
  * This table represents the standard PCI swizzle defined in the
  * PCI bus specification.
  */
-static unsigned char prep_pci_intpins[4][4] __prepdata =
+static unsigned char prep_pci_intpins[4][4] =
 {
        { 1, 2, 3, 4},  /* Buses 0, 4, 8, ... */
        { 2, 3, 4, 1},  /* Buses 1, 5, 9, ... */
@@ -622,7 +622,7 @@ static unsigned char prep_pci_intpins[4][4] __prepdata =
 #define MIN_DEVNR      11
 #define MAX_DEVNR      22
 
-static int __prep
+static int
 prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
                 int len, u32 *val)
 {
@@ -652,7 +652,7 @@ prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
        return PCIBIOS_SUCCESSFUL;
 }
 
-static int __prep
+static int
 prep_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
                  int len, u32 val)
 {
@@ -804,7 +804,7 @@ struct mot_info {
        void            (*map_non0_bus)(struct pci_dev *);      /* For boards with more than bus 0 devices. */
        struct powerplus_irq_list *pci_irq_list; /* List of PCI MPIC inputs */
        unsigned char   secondary_bridge_devfn; /* devfn of secondary bus transparent bridge */
-} mot_info[] __prepdata = {
+} mot_info[] = {
        {0x300, 0x00, 0x00, "MVME 2400",                        Genesis2_pci_IRQ_map,   Raven_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0xFF},
        {0x010, 0x00, 0x00, "Genesis",                          Genesis_pci_IRQ_map,    Genesis_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0x00},
        {0x020, 0x00, 0x00, "Powerstack (Series E)",            Comet_pci_IRQ_map,      Comet_pci_IRQ_routes, NULL, NULL, 0x00},
index bc926be..9e5637e 100644 (file)
@@ -89,9 +89,6 @@ extern void prep_tiger1_setup_pci(char *irq_edge_mask_lo, char *irq_edge_mask_hi
 #define cached_21      (((char *)(ppc_cached_irq_mask))[3])
 #define cached_A1      (((char *)(ppc_cached_irq_mask))[2])
 
-/* for the mac fs */
-dev_t boot_dev;
-
 #ifdef CONFIG_SOUND_CS4232
 long ppc_cs4232_dma, ppc_cs4232_dma2;
 #endif
@@ -173,7 +170,7 @@ prep_carolina_enable_l2(void)
 }
 
 /* cpuinfo code common to all IBM PReP */
-static void __prep
+static void
 prep_ibm_cpuinfo(struct seq_file *m)
 {
        unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -209,14 +206,14 @@ prep_ibm_cpuinfo(struct seq_file *m)
        }
 }
 
-static int __prep
+static int
 prep_gen_cpuinfo(struct seq_file *m)
 {
        prep_ibm_cpuinfo(m);
        return 0;
 }
 
-static int __prep
+static int
 prep_sandalfoot_cpuinfo(struct seq_file *m)
 {
        unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -243,7 +240,7 @@ prep_sandalfoot_cpuinfo(struct seq_file *m)
        return 0;
 }
 
-static int __prep
+static int
 prep_thinkpad_cpuinfo(struct seq_file *m)
 {
        unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -314,7 +311,7 @@ prep_thinkpad_cpuinfo(struct seq_file *m)
        return 0;
 }
 
-static int __prep
+static int
 prep_carolina_cpuinfo(struct seq_file *m)
 {
        unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -350,7 +347,7 @@ prep_carolina_cpuinfo(struct seq_file *m)
        return 0;
 }
 
-static int __prep
+static int
 prep_tiger1_cpuinfo(struct seq_file *m)
 {
        unsigned int l2_reg = inb(PREP_IBM_L2INFO);
@@ -393,7 +390,7 @@ prep_tiger1_cpuinfo(struct seq_file *m)
 
 
 /* Used by all Motorola PReP */
-static int __prep
+static int
 prep_mot_cpuinfo(struct seq_file *m)
 {
        unsigned int cachew = *((unsigned char *)CACHECRBA);
@@ -454,7 +451,7 @@ no_l2:
        return 0;
 }
 
-static void __prep
+static void
 prep_restart(char *cmd)
 {
 #define PREP_SP92      0x92    /* Special Port 92 */
@@ -473,7 +470,7 @@ prep_restart(char *cmd)
 #undef PREP_SP92
 }
 
-static void __prep
+static void
 prep_halt(void)
 {
        local_irq_disable(); /* no interrupts */
@@ -488,7 +485,7 @@ prep_halt(void)
 /* Carrera is the power manager in the Thinkpads. Unfortunately not much is
  * known about it, so we can't power down.
  */
-static void __prep
+static void
 prep_carrera_poweroff(void)
 {
        prep_halt();
@@ -501,7 +498,7 @@ prep_carrera_poweroff(void)
  * somewhat in the IBM Carolina Technical Specification.
  * -Hollis
  */
-static void __prep
+static void
 utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value)
 {
        /*
@@ -539,7 +536,7 @@ utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value)
        udelay(100);                            /* important: let controller recover */
 }
 
-static void __prep
+static void
 prep_sig750_poweroff(void)
 {
        /* tweak the power manager found in most IBM PRePs (except Thinkpads) */
@@ -554,7 +551,7 @@ prep_sig750_poweroff(void)
        /* not reached */
 }
 
-static int __prep
+static int
 prep_show_percpuinfo(struct seq_file *m, int i)
 {
        /* PREP's without residual data will give incorrect values here */
@@ -700,12 +697,12 @@ prep_set_bat(void)
 /*
  * IBM 3-digit status LED
  */
-static unsigned int ibm_statusled_base __prepdata;
+static unsigned int ibm_statusled_base;
 
-static void __prep
+static void
 ibm_statusled_progress(char *s, unsigned short hex);
 
-static int __prep
+static int
 ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2,
                    void * dummy3)
 {
@@ -713,13 +710,13 @@ ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block ibm_statusled_block __prepdata = {
+static struct notifier_block ibm_statusled_block = {
        ibm_statusled_panic,
        NULL,
        INT_MAX /* try to do it first */
 };
 
-static void __prep
+static void
 ibm_statusled_progress(char *s, unsigned short hex)
 {
        static int notifier_installed;
@@ -945,19 +942,6 @@ prep_calibrate_decr(void)
                todc_calibrate_decr();
 }
 
-static unsigned int __prep
-prep_irq_canonicalize(u_int irq)
-{
-       if (irq == 2)
-       {
-               return 9;
-       }
-       else
-       {
-               return irq;
-       }
-}
-
 static void __init
 prep_init_IRQ(void)
 {
@@ -996,7 +980,7 @@ prep_init_IRQ(void)
 /*
  * IDE stuff.
  */
-static int __prep
+static int
 prep_ide_default_irq(unsigned long base)
 {
        switch (base) {
@@ -1010,7 +994,7 @@ prep_ide_default_irq(unsigned long base)
        }
 }
 
-static unsigned long __prep
+static unsigned long
 prep_ide_default_io_base(int index)
 {
        switch (index) {
@@ -1055,7 +1039,7 @@ smp_prep_setup_cpu(int cpu_nr)
                do_openpic_setup_cpu();
 }
 
-static struct smp_ops_t prep_smp_ops __prepdata = {
+static struct smp_ops_t prep_smp_ops = {
        smp_openpic_message_pass,
        smp_prep_probe,
        smp_prep_kick_cpu,
@@ -1113,6 +1097,7 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
        ISA_DMA_THRESHOLD = 0x00ffffff;
        DMA_MODE_READ = 0x44;
        DMA_MODE_WRITE = 0x48;
+       ppc_do_canonicalize_irqs = 1;
 
        /* figure out what kind of prep workstation we are */
        if (have_residual_data) {
@@ -1139,7 +1124,6 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
        ppc_md.setup_arch     = prep_setup_arch;
        ppc_md.show_percpuinfo = prep_show_percpuinfo;
        ppc_md.show_cpuinfo   = NULL; /* set in prep_setup_arch() */
-       ppc_md.irq_canonicalize = prep_irq_canonicalize;
        ppc_md.init_IRQ       = prep_init_IRQ;
        /* this gets changed later on if we have an OpenPIC -- Cort */
        ppc_md.get_irq        = i8259_irq;
@@ -1176,6 +1160,6 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
 #endif
 
 #ifdef CONFIG_SMP
-       ppc_md.smp_ops           = &prep_smp_ops;
+       smp_ops                  = &prep_smp_ops;
 #endif /* CONFIG_SMP */
 }
index 0376c8c..5058568 100644 (file)
@@ -1183,18 +1183,18 @@ static void __init ppc7d_setup_arch(void)
                ROOT_DEV = Root_HDA1;
 #endif
 
-       if ((cur_cpu_spec[0]->cpu_features & CPU_FTR_SPEC7450) ||
-           (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR))
+       if ((cur_cpu_spec->cpu_features & CPU_FTR_SPEC7450) ||
+           (cur_cpu_spec->cpu_features & CPU_FTR_L3CR))
                /* 745x is different.  We only want to pass along enable. */
                _set_L2CR(L2CR_L2E);
-       else if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR)
+       else if (cur_cpu_spec->cpu_features & CPU_FTR_L2CR)
                /* All modules have 1MB of L2.  We also assume that an
                 * L2 divisor of 3 will work.
                 */
                _set_L2CR(L2CR_L2E | L2CR_L2SIZ_1MB | L2CR_L2CLK_DIV3
                          | L2CR_L2RAM_PIPE | L2CR_L2OH_1_0 | L2CR_L2DF);
 
-       if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR)
+       if (cur_cpu_spec->cpu_features & CPU_FTR_L3CR)
                /* No L3 cache */
                _set_L3CR(0);
 
@@ -1424,6 +1424,7 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
        ppc_md.setup_arch = ppc7d_setup_arch;
        ppc_md.init = ppc7d_init2;
        ppc_md.show_cpuinfo = ppc7d_show_cpuinfo;
+       /* XXX this is broken... */
        ppc_md.irq_canonicalize = ppc7d_irq_canonicalize;
        ppc_md.init_IRQ = ppc7d_init_irq;
        ppc_md.get_irq = ppc7d_get_irq;
index 0f84ca6..c991160 100644 (file)
@@ -47,7 +47,7 @@
 #include <asm/ide.h>
 
 
-unsigned char __res[sizeof(RESIDUAL)] __prepdata = {0,};
+unsigned char __res[sizeof(RESIDUAL)] = {0,};
 RESIDUAL *res = (RESIDUAL *)&__res;
 
 char * PnP_BASE_TYPES[] __initdata = {
index 5232283..d4c9781 100644 (file)
@@ -508,15 +508,6 @@ sandpoint_init_IRQ(void)
        i8259_init(0xfef00000);
 }
 
-static u32
-sandpoint_irq_canonicalize(u32 irq)
-{
-       if (irq == 2)
-               return 9;
-       else
-               return irq;
-}
-
 static unsigned long __init
 sandpoint_find_end_of_memory(void)
 {
@@ -727,10 +718,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
        ISA_DMA_THRESHOLD = 0x00ffffff;
        DMA_MODE_READ = 0x44;
        DMA_MODE_WRITE = 0x48;
+       ppc_do_canonicalize_irqs = 1;
 
        ppc_md.setup_arch = sandpoint_setup_arch;
        ppc_md.show_cpuinfo = sandpoint_show_cpuinfo;
-       ppc_md.irq_canonicalize = sandpoint_irq_canonicalize;
        ppc_md.init_IRQ = sandpoint_init_IRQ;
        ppc_md.get_irq = openpic_get_irq;
 
index b8d08f3..f6a2f19 100644 (file)
@@ -39,7 +39,7 @@ obj-$(CONFIG_8xx)             += m8xx_setup.o ppc8xx_pic.o $(wdt-mpc8xx-y) \
 ifeq ($(CONFIG_8xx),y)
 obj-$(CONFIG_PCI)              += qspan_pci.o i8259.o
 endif
-obj-$(CONFIG_PPC_OF)           += prom_init.o prom.o of_device.o
+obj-$(CONFIG_PPC_OF)           += prom_init.o prom.o
 obj-$(CONFIG_PPC_PMAC)         += open_pic.o indirect_pci.o
 obj-$(CONFIG_POWER4)           += open_pic2.o
 obj-$(CONFIG_PPC_CHRP)         += open_pic.o indirect_pci.o i8259.o
index 7734f68..12fa83e 100644 (file)
@@ -53,8 +53,8 @@ extern char *klimit;
  * chrp only uses it during early boot.
  */
 #ifdef CONFIG_XMON
-#define BTEXT  __pmac
-#define BTDATA __pmacdata
+#define BTEXT
+#define BTDATA
 #else
 #define BTEXT  __init
 #define BTDATA __initdata
@@ -187,7 +187,7 @@ btext_setup_display(int width, int height, int depth, int pitch,
  *    changes.
  */
 
-void __openfirmware
+void
 map_boot_text(void)
 {
        unsigned long base, offset, size;
index 44aa873..f97b3a9 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/system.h>
 #include <asm/irq.h>
 #include <asm/mv64x60.h>
+#include <asm/machdep.h>
 
 #define CPU_INTR_STR   "gt64260 cpu interface error"
 #define PCI0_INTR_STR  "gt64260 pci 0 error"
index 0bb9198..c36db27 100644 (file)
@@ -236,9 +236,9 @@ void __init ibm440gx_l2c_setup(struct ibm44x_clocks* p)
        /* Disable L2C on rev.A, rev.B and 800MHz version of rev.C,
           enable it on all other revisions
         */
-       if (strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. A") == 0 ||
-                       strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. B") == 0
-                       || (strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. C")
+       if (strcmp(cur_cpu_spec->cpu_name, "440GX Rev. A") == 0 ||
+                       strcmp(cur_cpu_spec->cpu_name, "440GX Rev. B") == 0
+                       || (strcmp(cur_cpu_spec->cpu_name, "440GX Rev. C")
                                == 0 && p->cpu > 667000000))
                ibm440gx_l2c_disable();
        else
index 7612e06..95e11f9 100644 (file)
@@ -178,7 +178,7 @@ void __init ibm44x_platform_init(void)
 #endif
 }
 
-/* Called from MachineCheckException */
+/* Called from machine_check_exception */
 void platform_machine_check(struct pt_regs *regs)
 {
        printk("PLB0: BEAR=0x%08x%08x ACR=  0x%08x BESR= 0x%08x\n",
index 9db58c5..5cce123 100644 (file)
@@ -302,7 +302,7 @@ pq2ads_setup_pci(struct pci_controller *hose)
 
 void __init pq2_find_bridges(void)
 {
-       extern int pci_assign_all_busses;
+       extern int pci_assign_all_buses;
        struct pci_controller * hose;
        int host_bridge;
 
index 4c888da..c88e2d4 100644 (file)
@@ -406,7 +406,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
 
        ppc_md.setup_arch               = m8xx_setup_arch;
        ppc_md.show_percpuinfo          = m8xx_show_percpuinfo;
-       ppc_md.irq_canonicalize = NULL;
        ppc_md.init_IRQ                 = m8xx_init_IRQ;
        ppc_md.get_irq                  = m8xx_get_irq;
        ppc_md.init                     = NULL;
index 59cf3e8..4ac1908 100644 (file)
@@ -21,6 +21,7 @@
 #include "mpc52xx_pci.h"
 
 #include <asm/delay.h>
+#include <asm/machdep.h>
 
 
 static int
@@ -181,7 +182,7 @@ mpc52xx_find_bridges(void)
        struct mpc52xx_pci __iomem *pci_regs;
        struct pci_controller *hose;
 
-       pci_assign_all_busses = 1;
+       pci_assign_all_buses = 1;
 
        pci_regs = ioremap(MPC52xx_PA(MPC52xx_PCI_OFFSET), MPC52xx_PCI_SIZE);
        if (!pci_regs)
index 95b3b8a..dbf8aca 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/mpc83xx.h>
 #include <asm/irq.h>
 #include <asm/ppc_sys.h>
+#include <asm/machdep.h>
 
 /* We use offsets for IORESOURCE_MEM since we do not know at compile time
  * what IMMRBAR is, will get fixed up by mach_mpc83xx_fixup
index 8356da4..58b0aa8 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/system.h>
 #include <asm/irq.h>
 #include <asm/mv64x60.h>
+#include <asm/machdep.h>
 
 #ifdef CONFIG_IRQ_ALL_CPUS
 #error "The mv64360 does not support distribution of IRQs on all CPUs"
index 4849850..a781c50 100644 (file)
@@ -1304,7 +1304,7 @@ mv64x60_config_pci_params(struct pci_controller *hose,
        early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val);
 
        /* Set latency timer, cache line size, clear BIST */
-       u16_val = (pi->latency_timer << 8) | (L1_CACHE_LINE_SIZE >> 2);
+       u16_val = (pi->latency_timer << 8) | (L1_CACHE_BYTES >> 2);
        early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val);
 
        mv64x60_pci_exclude_bridge = save_exclude;
index 2927c7a..fa5b2e4 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <asm/delay.h>
 #include <asm/mv64x60.h>
+#include <asm/machdep.h>
 
 
 #if defined(CONFIG_SERIAL_TEXT_DEBUG)
diff --git a/arch/ppc/syslib/of_device.c b/arch/ppc/syslib/of_device.c
deleted file mode 100644 (file)
index 93c7231..0000000
+++ /dev/null
@@ -1,276 +0,0 @@
-#include <linux/config.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <asm/errno.h>
-#include <asm/of_device.h>
-
-/**
- * of_match_device - Tell if an of_device structure has a matching
- * of_match structure
- * @ids: array of of device match structures to search in
- * @dev: the of device structure to match against
- *
- * Used by a driver to check whether an of_device present in the
- * system is in its list of supported devices.
- */
-const struct of_device_id * of_match_device(const struct of_device_id *matches,
-                                       const struct of_device *dev)
-{
-       if (!dev->node)
-               return NULL;
-       while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
-               int match = 1;
-               if (matches->name[0])
-                       match &= dev->node->name
-                               && !strcmp(matches->name, dev->node->name);
-               if (matches->type[0])
-                       match &= dev->node->type
-                               && !strcmp(matches->type, dev->node->type);
-               if (matches->compatible[0])
-                       match &= device_is_compatible(dev->node,
-                               matches->compatible);
-               if (match)
-                       return matches;
-               matches++;
-       }
-       return NULL;
-}
-
-static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
-{
-       struct of_device * of_dev = to_of_device(dev);
-       struct of_platform_driver * of_drv = to_of_platform_driver(drv);
-       const struct of_device_id * matches = of_drv->match_table;
-
-       if (!matches)
-               return 0;
-
-       return of_match_device(matches, of_dev) != NULL;
-}
-
-struct of_device *of_dev_get(struct of_device *dev)
-{
-       struct device *tmp;
-
-       if (!dev)
-               return NULL;
-       tmp = get_device(&dev->dev);
-       if (tmp)
-               return to_of_device(tmp);
-       else
-               return NULL;
-}
-
-void of_dev_put(struct of_device *dev)
-{
-       if (dev)
-               put_device(&dev->dev);
-}
-
-
-static int of_device_probe(struct device *dev)
-{
-       int error = -ENODEV;
-       struct of_platform_driver *drv;
-       struct of_device *of_dev;
-       const struct of_device_id *match;
-
-       drv = to_of_platform_driver(dev->driver);
-       of_dev = to_of_device(dev);
-
-       if (!drv->probe)
-               return error;
-
-       of_dev_get(of_dev);
-
-       match = of_match_device(drv->match_table, of_dev);
-       if (match)
-               error = drv->probe(of_dev, match);
-       if (error)
-               of_dev_put(of_dev);
-
-       return error;
-}
-
-static int of_device_remove(struct device *dev)
-{
-       struct of_device * of_dev = to_of_device(dev);
-       struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-
-       if (dev->driver && drv->remove)
-               drv->remove(of_dev);
-       return 0;
-}
-
-static int of_device_suspend(struct device *dev, pm_message_t state)
-{
-       struct of_device * of_dev = to_of_device(dev);
-       struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-       int error = 0;
-
-       if (dev->driver && drv->suspend)
-               error = drv->suspend(of_dev, state);
-       return error;
-}
-
-static int of_device_resume(struct device * dev)
-{
-       struct of_device * of_dev = to_of_device(dev);
-       struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-       int error = 0;
-
-       if (dev->driver && drv->resume)
-               error = drv->resume(of_dev);
-       return error;
-}
-
-struct bus_type of_platform_bus_type = {
-       .name   = "of_platform",
-       .match  = of_platform_bus_match,
-       .suspend        = of_device_suspend,
-       .resume = of_device_resume,
-};
-
-static int __init of_bus_driver_init(void)
-{
-       return bus_register(&of_platform_bus_type);
-}
-
-postcore_initcall(of_bus_driver_init);
-
-int of_register_driver(struct of_platform_driver *drv)
-{
-       int count = 0;
-
-       /* initialize common driver fields */
-       drv->driver.name = drv->name;
-       drv->driver.bus = &of_platform_bus_type;
-       drv->driver.probe = of_device_probe;
-       drv->driver.remove = of_device_remove;
-
-       /* register with core */
-       count = driver_register(&drv->driver);
-       return count ? count : 1;
-}
-
-void of_unregister_driver(struct of_platform_driver *drv)
-{
-       driver_unregister(&drv->driver);
-}
-
-
-static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct of_device *ofdev;
-
-       ofdev = to_of_device(dev);
-       return sprintf(buf, "%s", ofdev->node->full_name);
-}
-
-static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
-
-/**
- * of_release_dev - free an of device structure when all users of it are finished.
- * @dev: device that's been disconnected
- *
- * Will be called only by the device core when all users of this of device are
- * done.
- */
-void of_release_dev(struct device *dev)
-{
-       struct of_device *ofdev;
-
-        ofdev = to_of_device(dev);
-       of_node_put(ofdev->node);
-       kfree(ofdev);
-}
-
-int of_device_register(struct of_device *ofdev)
-{
-       int rc;
-       struct of_device **odprop;
-
-       BUG_ON(ofdev->node == NULL);
-
-       odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
-       if (!odprop) {
-               struct property *new_prop;
-       
-               new_prop = kmalloc(sizeof(struct property) + sizeof(struct of_device *),
-                       GFP_KERNEL);
-               if (new_prop == NULL)
-                       return -ENOMEM;
-               new_prop->name = "linux,device";
-               new_prop->length = sizeof(sizeof(struct of_device *));
-               new_prop->value = (unsigned char *)&new_prop[1];
-               odprop = (struct of_device **)new_prop->value;
-               *odprop = NULL;
-               prom_add_property(ofdev->node, new_prop);
-       }
-       *odprop = ofdev;
-
-       rc = device_register(&ofdev->dev);
-       if (rc)
-               return rc;
-
-       device_create_file(&ofdev->dev, &dev_attr_devspec);
-
-       return 0;
-}
-
-void of_device_unregister(struct of_device *ofdev)
-{
-       struct of_device **odprop;
-
-       device_remove_file(&ofdev->dev, &dev_attr_devspec);
-
-       odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
-       if (odprop)
-               *odprop = NULL;
-
-       device_unregister(&ofdev->dev);
-}
-
-struct of_device* of_platform_device_create(struct device_node *np,
-                                           const char *bus_id,
-                                           struct device *parent)
-{
-       struct of_device *dev;
-       u32 *reg;
-
-       dev = kmalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev)
-               return NULL;
-       memset(dev, 0, sizeof(*dev));
-
-       dev->node = of_node_get(np);
-       dev->dma_mask = 0xffffffffUL;
-       dev->dev.dma_mask = &dev->dma_mask;
-       dev->dev.parent = parent;
-       dev->dev.bus = &of_platform_bus_type;
-       dev->dev.release = of_release_dev;
-
-       reg = (u32 *)get_property(np, "reg", NULL);
-       strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
-
-       if (of_device_register(dev) != 0) {
-               kfree(dev);
-               return NULL;
-       }
-
-       return dev;
-}
-
-EXPORT_SYMBOL(of_match_device);
-EXPORT_SYMBOL(of_platform_bus_type);
-EXPORT_SYMBOL(of_register_driver);
-EXPORT_SYMBOL(of_unregister_driver);
-EXPORT_SYMBOL(of_device_register);
-EXPORT_SYMBOL(of_device_unregister);
-EXPORT_SYMBOL(of_dev_get);
-EXPORT_SYMBOL(of_dev_put);
-EXPORT_SYMBOL(of_platform_device_create);
-EXPORT_SYMBOL(of_release_dev);
index 1cf5de2..8947797 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/sections.h>
 #include <asm/open_pic.h>
 #include <asm/i8259.h>
+#include <asm/machdep.h>
 
 #include "open_pic_defs.h"
 
@@ -889,7 +890,7 @@ openpic_get_irq(struct pt_regs *regs)
 
 #ifdef CONFIG_SMP
 void
-smp_openpic_message_pass(int target, int msg, unsigned long data, int wait)
+smp_openpic_message_pass(int target, int msg)
 {
        cpumask_t mask = CPU_MASK_ALL;
        /* make sure we're sending something that translates to an IPI */
index 16cff91..1c40049 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/sections.h>
 #include <asm/open_pic.h>
 #include <asm/i8259.h>
+#include <asm/machdep.h>
 
 #include "open_pic_defs.h"
 
index ce4d1de..c46043c 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/system.h>
 #include <asm/irq.h>
 #include <asm/ppc4xx_pic.h>
+#include <asm/machdep.h>
 
 /* Function Prototypes */
 
index 4008621..0b43563 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/system.h>
 #include <asm/irq.h>
 #include <asm/ppc4xx_pic.h>
+#include <asm/machdep.h>
 
 /* See comment in include/arch-ppc/ppc4xx_pic.h
  * for more info about these two variables
index bf83240..e83a83f 100644 (file)
@@ -278,7 +278,7 @@ ppc4xx_init(unsigned long r3, unsigned long r4, unsigned long r5,
 #endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */
 }
 
-/* Called from MachineCheckException */
+/* Called from machine_check_exception */
 void platform_machine_check(struct pt_regs *regs)
 {
 #if defined(DCRN_PLB0_BEAR)
index 890484e..4da168a 100644 (file)
@@ -40,6 +40,7 @@
 #include <asm/ppc_sys.h>
 #include <asm/kgdb.h>
 #include <asm/delay.h>
+#include <asm/machdep.h>
 
 #include <syslib/ppc83xx_setup.h>
 #if defined(CONFIG_PCI)
index 832b8bf..de2f905 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/mmu.h>
 #include <asm/ppc_sys.h>
 #include <asm/kgdb.h>
+#include <asm/machdep.h>
 
 #include <syslib/ppc85xx_setup.h>
 
index 1d38697..6f88ba9 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/cpm2.h>
 #include <asm/irq.h>
 #include <asm/ppc_sys.h>
+#include <asm/machdep.h>
 
 struct platform_device ppc_sys_platform_devices[] = {
        [MPC82xx_CPM_FCC1] = {
index 8599850..2c6364d 100644 (file)
 static char nvramData[MAX_PREP_NVRAM];
 static NVRAM_MAP *nvram=(NVRAM_MAP *)&nvramData[0];
 
-unsigned char __prep prep_nvram_read_val(int addr)
+unsigned char prep_nvram_read_val(int addr)
 {
        outb(addr, PREP_NVRAM_AS0);
        outb(addr>>8, PREP_NVRAM_AS1);
        return inb(PREP_NVRAM_DATA);
 }
 
-void __prep prep_nvram_write_val(int           addr,
+void prep_nvram_write_val(int           addr,
                          unsigned char val)
 {
        outb(addr, PREP_NVRAM_AS0);
@@ -81,8 +81,7 @@ void __init init_prep_nvram(void)
        }
 }
 
-__prep
-char __prep *prep_nvram_get_var(const char *name)
+char *prep_nvram_get_var(const char *name)
 {
        char *cp;
        int  namelen;
@@ -101,8 +100,7 @@ char __prep *prep_nvram_get_var(const char *name)
        return NULL;
 }
 
-__prep
-char __prep *prep_nvram_first_var(void)
+char *prep_nvram_first_var(void)
 {
         if (nvram->Header.GELength == 0) {
                return NULL;
@@ -112,8 +110,7 @@ char __prep *prep_nvram_first_var(void)
        }
 }
 
-__prep
-char __prep *prep_nvram_next_var(char *name)
+char *prep_nvram_next_var(char *name)
 {
        char *cp;
 
index 2c64ed6..278da6e 100644 (file)
@@ -89,7 +89,7 @@ extern char cmd_line[512];    /* XXX */
 extern boot_infos_t *boot_infos;
 unsigned long dev_tree_size;
 
-void __openfirmware
+void
 phys_call_rtas(int service, int nargs, int nret, ...)
 {
        va_list list;
@@ -862,7 +862,7 @@ find_type_devices(const char *type)
 /*
  * Returns all nodes linked together
  */
-struct device_node * __openfirmware
+struct device_node *
 find_all_nodes(void)
 {
        struct device_node *head, **prevp, *np;
@@ -1165,7 +1165,7 @@ get_property(struct device_node *np, const char *name, int *lenp)
 /*
  * Add a property to a node
  */
-void __openfirmware
+void
 prom_add_property(struct device_node* np, struct property* prop)
 {
        struct property **next = &np->properties;
@@ -1177,7 +1177,7 @@ prom_add_property(struct device_node* np, struct property* prop)
 }
 
 /* I quickly hacked that one, check against spec ! */
-static inline unsigned long __openfirmware
+static inline unsigned long
 bus_space_to_resource_flags(unsigned int bus_space)
 {
        u8 space = (bus_space >> 24) & 0xf;
@@ -1194,7 +1194,7 @@ bus_space_to_resource_flags(unsigned int bus_space)
        }
 }
 
-static struct resource* __openfirmware
+static struct resource*
 find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range)
 {
        unsigned long mask;
@@ -1224,7 +1224,7 @@ find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range)
  * or other nodes attached to the root node. Ultimately, put some
  * link to resources in the OF node.
  */
-struct resource* __openfirmware
+struct resource*
 request_OF_resource(struct device_node* node, int index, const char* name_postfix)
 {
        struct pci_dev* pcidev;
@@ -1280,7 +1280,7 @@ fail:
        return NULL;
 }
 
-int __openfirmware
+int
 release_OF_resource(struct device_node* node, int index)
 {
        struct pci_dev* pcidev;
@@ -1346,7 +1346,7 @@ release_OF_resource(struct device_node* node, int index)
 }
 
 #if 0
-void __openfirmware
+void
 print_properties(struct device_node *np)
 {
        struct property *pp;
@@ -1400,7 +1400,7 @@ print_properties(struct device_node *np)
 static DEFINE_SPINLOCK(rtas_lock);
 
 /* this can be called after setup -- Cort */
-int __openfirmware
+int
 call_rtas(const char *service, int nargs, int nret,
          unsigned long *outputs, ...)
 {
index 2cbcad2..47f04c7 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/io.h>
 #include <asm/xparameters.h>
 #include <asm/ibm4xx.h>
+#include <asm/machdep.h>
 
 /* No one else should require these constants, so define them locally here. */
 #define ISR 0                  /* Interrupt Status Register */
index 507d4ee..98612d4 100644 (file)
@@ -478,8 +478,9 @@ void *xmon_stdout;
 void *xmon_stderr;
 
 void
-xmon_init(void)
+xmon_init(int arg)
 {
+       xmon_map_scc();
 }
 
 int
index c658650..8cbac7f 100644 (file)
@@ -10,6 +10,9 @@ config MMU
        bool
        default y
 
+config PPC_STD_MMU
+       def_bool y
+
 config UID16
        bool
 
@@ -186,6 +189,9 @@ config BOOTX_TEXT
          Say Y here to see progress messages from the boot firmware in text
          mode. Requires an Open Firmware compatible video card.
 
+config POWER4
+       def_bool y
+
 config POWER4_ONLY
        bool "Optimize for POWER4"
        default n
@@ -357,7 +363,6 @@ config HOTPLUG_CPU
 
 config PROC_DEVICETREE
        bool "Support for Open Firmware device tree in /proc"
-       depends on !PPC_ISERIES
        help
          This option adds a device-tree directory under /proc which contains
          an image of the device tree that the kernel copies from Open
@@ -461,7 +466,7 @@ config VIOPATH
        depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
        default y
 
-source "arch/ppc64/oprofile/Kconfig"
+source "arch/powerpc/oprofile/Kconfig"
 
 source "arch/ppc64/Kconfig.debug"
 
index 521c2a5..4d18bdb 100644 (file)
@@ -82,10 +82,12 @@ CFLAGS += $(call cc-option,-funit-at-a-time)
 head-y := arch/ppc64/kernel/head.o
 
 libs-y                         += arch/ppc64/lib/
-core-y                         += arch/ppc64/kernel/
-core-y                         += arch/ppc64/mm/
+core-y                         += arch/ppc64/kernel/ arch/powerpc/kernel/
+core-y                         += arch/powerpc/mm/
+core-y                         += arch/powerpc/sysdev/
+core-y                         += arch/powerpc/platforms/
 core-$(CONFIG_XMON)            += arch/ppc64/xmon/
-drivers-$(CONFIG_OPROFILE)     += arch/ppc64/oprofile/
+drivers-$(CONFIG_OPROFILE)     += arch/powerpc/oprofile/
 
 boot := arch/ppc64/boot
 
diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c
deleted file mode 100644 (file)
index 90032b1..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright 2001 Mike Corrigan IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/system.h>
-#include <asm/iSeries/HvLpEvent.h>
-#include <asm/iSeries/HvCallEvent.h>
-#include <asm/iSeries/ItLpNaca.h>
-
-/* Array of LpEvent handler functions */
-LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
-unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
-
-/* Register a handler for an LpEvent type */
-
-int HvLpEvent_registerHandler( HvLpEvent_Type eventType, LpEventHandler handler )
-{
-       int rc = 1;
-       if ( eventType < HvLpEvent_Type_NumTypes ) {
-               lpEventHandler[eventType] = handler;
-               rc = 0;
-       }
-       return rc;
-       
-}
-
-int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
-{
-       int rc = 1;
-
-       might_sleep();
-
-       if ( eventType < HvLpEvent_Type_NumTypes ) {
-               if ( !lpEventHandlerPaths[eventType] ) {
-                       lpEventHandler[eventType] = NULL;
-                       rc = 0;
-
-                       /* We now sleep until all other CPUs have scheduled. This ensures that
-                        * the deletion is seen by all other CPUs, and that the deleted handler
-                        * isn't still running on another CPU when we return. */
-                       synchronize_rcu();
-               }
-       }
-       return rc;
-}
-EXPORT_SYMBOL(HvLpEvent_registerHandler);
-EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
-
-/* (lpIndex is the partition index of the target partition.  
- * needed only for VirtualIo, VirtualLan and SessionMgr.  Zero
- * indicates to use our partition index - for the other types)
- */
-int HvLpEvent_openPath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
-{
-       int rc = 1;
-       if ( eventType < HvLpEvent_Type_NumTypes &&
-            lpEventHandler[eventType] ) {
-               if ( lpIndex == 0 )
-                       lpIndex = itLpNaca.xLpIndex;
-               HvCallEvent_openLpEventPath( lpIndex, eventType );
-               ++lpEventHandlerPaths[eventType];
-               rc = 0;
-       }
-       return rc;
-}
-
-int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
-{
-       int rc = 1;
-       if ( eventType < HvLpEvent_Type_NumTypes &&
-            lpEventHandler[eventType] &&
-            lpEventHandlerPaths[eventType] ) {
-               if ( lpIndex == 0 )
-                       lpIndex = itLpNaca.xLpIndex;
-               HvCallEvent_closeLpEventPath( lpIndex, eventType );
-               --lpEventHandlerPaths[eventType];
-               rc = 0;
-       }
-       return rc;
-}
-
index ae60eb1..424dd25 100644 (file)
@@ -2,36 +2,34 @@
 # Makefile for the linux ppc64 kernel.
 #
 
+ifneq ($(CONFIG_PPC_MERGE),y)
+
 EXTRA_CFLAGS   += -mno-minimal-toc
 extra-y                := head.o vmlinux.lds
 
-obj-y               := setup.o entry.o traps.o irq.o idle.o dma.o \
-                       time.o process.o signal.o syscalls.o misc.o ptrace.o \
-                       align.o semaphore.o bitops.o pacaData.o \
-                       udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
-                       ptrace32.o signal32.o rtc.o init_task.o \
-                       lmb.o cputable.o cpu_setup_power4.o idle_power4.o \
-                       iommu.o sysfs.o vdso.o pmc.o firmware.o
-obj-y += vdso32/ vdso64/
+obj-y               := setup.o entry.o misc.o prom.o
 
-obj-$(CONFIG_PPC_OF) +=        of_device.o
+endif
+
+obj-y               += irq.o idle.o dma.o \
+                       signal.o \
+                       align.o bitops.o pacaData.o \
+                       udbg.o ioctl32.o \
+                       rtc.o \
+                       cpu_setup_power4.o \
+                       iommu.o sysfs.o vdso.o firmware.o
+obj-y += vdso32/ vdso64/
 
-pci-obj-$(CONFIG_PPC_ISERIES)  += iSeries_pci.o iSeries_irq.o \
-                               iSeries_VpdInfo.o
 pci-obj-$(CONFIG_PPC_MULTIPLATFORM)    += pci_dn.o pci_direct_iommu.o
 
 obj-$(CONFIG_PCI)      += pci.o pci_iommu.o iomap.o $(pci-obj-y)
 
-obj-$(CONFIG_PPC_ISERIES) += HvCall.o HvLpConfig.o LparData.o \
-                            iSeries_setup.o ItLpQueue.o hvCall.o \
-                            mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \
-                            iSeries_iommu.o
-
-obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o
+obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o
+ifneq ($(CONFIG_PPC_MERGE),y)
+obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
+endif
 
-obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
-                            pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \
-                            pSeries_setup.o pSeries_iommu.o udbg_16550.o
+obj-$(CONFIG_PPC_PSERIES) += rtasd.o ras.o udbg_16550.o
 
 obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \
                         bpa_iic.o spider-pic.o
@@ -41,25 +39,24 @@ obj-$(CONFIG_EEH)           += eeh.o
 obj-$(CONFIG_PROC_FS)          += proc_ppc64.o
 obj-$(CONFIG_RTAS_FLASH)       += rtas_flash.o
 obj-$(CONFIG_SMP)              += smp.o
-obj-$(CONFIG_MODULES)          += module.o ppc_ksyms.o
+obj-$(CONFIG_MODULES)          += module.o
+ifneq ($(CONFIG_PPC_MERGE),y)
+obj-$(CONFIG_MODULES)          += ppc_ksyms.o
+endif
 obj-$(CONFIG_PPC_RTAS)         += rtas.o rtas_pci.o
 obj-$(CONFIG_RTAS_PROC)                += rtas-proc.o
 obj-$(CONFIG_SCANLOG)          += scanlog.o
-obj-$(CONFIG_VIOPATH)          += viopath.o
 obj-$(CONFIG_LPARCFG)          += lparcfg.o
 obj-$(CONFIG_HVC_CONSOLE)      += hvconsole.o
+ifneq ($(CONFIG_PPC_MERGE),y)
 obj-$(CONFIG_BOOTX_TEXT)       += btext.o
+endif
 obj-$(CONFIG_HVCS)             += hvcserver.o
 
-vio-obj-$(CONFIG_PPC_PSERIES)  += pSeries_vio.o
-vio-obj-$(CONFIG_PPC_ISERIES)  += iSeries_vio.o
-obj-$(CONFIG_IBMVIO)           += vio.o $(vio-obj-y)
+obj-$(CONFIG_IBMVIO)           += vio.o
 obj-$(CONFIG_XICS)             += xics.o
-obj-$(CONFIG_MPIC)             += mpic.o
 
-obj-$(CONFIG_PPC_PMAC)         += pmac_setup.o pmac_feature.o pmac_pci.o \
-                                  pmac_time.o pmac_nvram.o pmac_low_i2c.o \
-                                  udbg_scc.o
+obj-$(CONFIG_PPC_PMAC)         += udbg_scc.o
 
 obj-$(CONFIG_PPC_MAPLE)                += maple_setup.o maple_pci.o maple_time.o \
                                   udbg_16550.o
@@ -67,19 +64,17 @@ obj-$(CONFIG_PPC_MAPLE)             += maple_setup.o maple_pci.o maple_time.o \
 obj-$(CONFIG_U3_DART)          += u3_iommu.o
 
 ifdef CONFIG_SMP
-obj-$(CONFIG_PPC_PMAC)         += pmac_smp.o smp-tbsync.o
-obj-$(CONFIG_PPC_ISERIES)      += iSeries_smp.o
-obj-$(CONFIG_PPC_PSERIES)      += pSeries_smp.o
-obj-$(CONFIG_PPC_BPA)          += pSeries_smp.o
+obj-$(CONFIG_PPC_PMAC)         += smp-tbsync.o
 obj-$(CONFIG_PPC_MAPLE)                += smp-tbsync.o
 endif
 
-obj-$(CONFIG_ALTIVEC)          += vecemu.o vector.o
 obj-$(CONFIG_KPROBES)          += kprobes.o
 
 CFLAGS_ioctl32.o += -Ifs/
 
+ifneq ($(CONFIG_PPC_MERGE),y)
 ifeq ($(CONFIG_PPC_ISERIES),y)
-arch/ppc64/kernel/head.o: arch/ppc64/kernel/lparmap.s
-AFLAGS_head.o += -Iarch/ppc64/kernel
+arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
+AFLAGS_head.o += -Iarch/powerpc/kernel
+endif
 endif
index 1ff4fa0..5e6046c 100644 (file)
@@ -46,8 +46,6 @@
 int main(void)
 {
        /* thread struct on stack */
-       DEFINE(THREAD_SHIFT, THREAD_SHIFT);
-       DEFINE(THREAD_SIZE, THREAD_SIZE);
        DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
        DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
        DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
@@ -77,6 +75,7 @@ int main(void)
        DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
        DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
        DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
+       DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
 
        /* paca */
         DEFINE(PACA_SIZE, sizeof(struct paca_struct));
index 5f24600..da1b4b7 100644 (file)
@@ -39,8 +39,8 @@
 #include <asm/pmac_feature.h>
 #include <asm/abs_addr.h>
 #include <asm/system.h>
+#include <asm/ppc-pci.h>
 
-#include "pci.h"
 #include "bpa_iommu.h"
 
 static inline unsigned long 
index 57b3db6..c2dc8f2 100644 (file)
@@ -43,8 +43,9 @@
 #include <asm/time.h>
 #include <asm/nvram.h>
 #include <asm/cputable.h>
+#include <asm/ppc-pci.h>
+#include <asm/irq.h>
 
-#include "pci.h"
 #include "bpa_iic.h"
 #include "bpa_iommu.h"
 
@@ -54,7 +55,7 @@
 #define DBG(fmt...)
 #endif
 
-void bpa_get_cpuinfo(struct seq_file *m)
+void bpa_show_cpuinfo(struct seq_file *m)
 {
        struct device_node *root;
        const char *model = "";
@@ -128,7 +129,7 @@ struct machdep_calls __initdata bpa_md = {
        .probe                  = bpa_probe,
        .setup_arch             = bpa_setup_arch,
        .init_early             = bpa_init_early,
-       .get_cpuinfo            = bpa_get_cpuinfo,
+       .show_cpuinfo           = bpa_show_cpuinfo,
        .restart                = rtas_restart,
        .power_off              = rtas_power_off,
        .halt                   = rtas_halt,
index b6fbfbe..506a378 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/io.h>
 #include <asm/lmb.h>
 #include <asm/processor.h>
+#include <asm/udbg.h>
 
 #undef NO_SCROLL
 
@@ -131,6 +132,47 @@ int btext_initialize(struct device_node *np)
        return 0;
 }
 
+static void btext_putc(unsigned char c)
+{
+       btext_drawchar(c);
+}
+
+void __init init_boot_display(void)
+{
+       char *name;
+       struct device_node *np = NULL; 
+       int rc = -ENODEV;
+
+       printk("trying to initialize btext ...\n");
+
+       name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
+       if (name != NULL) {
+               np = of_find_node_by_path(name);
+               if (np != NULL) {
+                       if (strcmp(np->type, "display") != 0) {
+                               printk("boot stdout isn't a display !\n");
+                               of_node_put(np);
+                               np = NULL;
+                       }
+               }
+       }
+       if (np)
+               rc = btext_initialize(np);
+       if (rc) {
+               for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
+                       if (get_property(np, "linux,opened", NULL)) {
+                               printk("trying %s ...\n", np->full_name);
+                               rc = btext_initialize(np);
+                               printk("result: %d\n", rc);
+                       }
+                       if (rc == 0)
+                               break;
+               }
+       }
+       if (rc == 0 && udbg_putc == NULL)
+               udbg_putc = btext_putc;
+}
+
 
 /* Calc the base address of a given point (x,y) */
 static unsigned char * calc_base(int x, int y)
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
deleted file mode 100644 (file)
index 8831a28..0000000
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- *  arch/ppc64/kernel/cputable.c
- *
- *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
- *
- *  Modifications for ppc64:
- *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/threads.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <asm/oprofile_impl.h>
-#include <asm/cputable.h>
-
-struct cpu_spec* cur_cpu_spec = NULL;
-EXPORT_SYMBOL(cur_cpu_spec);
-
-/* NOTE:
- * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
- * the responsibility of the appropriate CPU save/restore functions to
- * eventually copy these settings over. Those save/restore aren't yet
- * part of the cputable though. That has to be fixed for both ppc32
- * and ppc64
- */
-extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
-
-
-/* We only set the altivec features if the kernel was compiled with altivec
- * support
- */
-#ifdef CONFIG_ALTIVEC
-#define CPU_FTR_ALTIVEC_COMP   CPU_FTR_ALTIVEC
-#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
-#else
-#define CPU_FTR_ALTIVEC_COMP   0
-#define PPC_FEATURE_HAS_ALTIVEC_COMP    0
-#endif
-
-struct cpu_spec        cpu_specs[] = {
-       {       /* Power3 */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00400000,
-               .cpu_name               = "POWER3 (630)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
-               .cpu_user_features = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/power3",
-               .oprofile_model         = &op_model_rs64,
-#endif
-       },
-       {       /* Power3+ */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00410000,
-               .cpu_name               = "POWER3 (630+)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
-               .cpu_user_features      = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/power3",
-               .oprofile_model         = &op_model_rs64,
-#endif
-       },
-       {       /* Northstar */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00330000,
-               .cpu_name               = "RS64-II (northstar)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
-                       CPU_FTR_MMCRA | CPU_FTR_CTRL,
-               .cpu_user_features      = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/rs64",
-               .oprofile_model         = &op_model_rs64,
-#endif
-       },
-       {       /* Pulsar */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00340000,
-               .cpu_name               = "RS64-III (pulsar)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
-                       CPU_FTR_MMCRA | CPU_FTR_CTRL,
-               .cpu_user_features      = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/rs64",
-               .oprofile_model         = &op_model_rs64,
-#endif
-       },
-       {       /* I-star */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00360000,
-               .cpu_name               = "RS64-III (icestar)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
-                       CPU_FTR_MMCRA | CPU_FTR_CTRL,
-               .cpu_user_features      = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/rs64",
-               .oprofile_model         = &op_model_rs64,
-#endif
-       },
-       {       /* S-star */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00370000,
-               .cpu_name               = "RS64-IV (sstar)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
-                       CPU_FTR_MMCRA | CPU_FTR_CTRL,
-               .cpu_user_features      = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/rs64",
-               .oprofile_model         = &op_model_rs64,
-#endif
-       },
-       {       /* Power4 */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00350000,
-               .cpu_name               = "POWER4 (gp)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
-               .cpu_user_features      = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power4,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/power4",
-               .oprofile_model         = &op_model_rs64,
-#endif
-       },
-       {       /* Power4+ */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00380000,
-               .cpu_name               = "POWER4+ (gq)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
-               .cpu_user_features      = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_power4,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/power4",
-               .oprofile_model         = &op_model_power4,
-#endif
-       },
-       {       /* PPC970 */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00390000,
-               .cpu_name               = "PPC970",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
-                       CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
-               .cpu_user_features      = COMMON_USER_PPC64 |
-                       PPC_FEATURE_HAS_ALTIVEC_COMP,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_ppc970,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/970",
-               .oprofile_model         = &op_model_power4,
-#endif
-       },
-       {       /* PPC970FX */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x003c0000,
-               .cpu_name               = "PPC970FX",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
-                       CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
-               .cpu_user_features      = COMMON_USER_PPC64 |
-                       PPC_FEATURE_HAS_ALTIVEC_COMP,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_ppc970,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/970",
-               .oprofile_model         = &op_model_power4,
-#endif
-       },
-       {       /* PPC970MP */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00440000,
-               .cpu_name               = "PPC970MP",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
-                       CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
-               .cpu_user_features      = COMMON_USER_PPC64 |
-                       PPC_FEATURE_HAS_ALTIVEC_COMP,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .cpu_setup              = __setup_cpu_ppc970,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/970",
-               .oprofile_model         = &op_model_power4,
-#endif
-       },
-       {       /* Power5 */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x003a0000,
-               .cpu_name               = "POWER5 (gr)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
-                       CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
-                       CPU_FTR_MMCRA_SIHV,
-               .cpu_user_features      = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 6,
-               .cpu_setup              = __setup_cpu_power4,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/power5",
-               .oprofile_model         = &op_model_power4,
-#endif
-       },
-       {       /* Power5 */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x003b0000,
-               .cpu_name               = "POWER5 (gs)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
-                       CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
-                       CPU_FTR_MMCRA_SIHV,
-               .cpu_user_features      = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 6,
-               .cpu_setup              = __setup_cpu_power4,
-#ifdef CONFIG_OPROFILE
-               .oprofile_cpu_type      = "ppc64/power5",
-               .oprofile_model         = &op_model_power4,
-#endif
-       },
-       {       /* BE DD1.x */
-               .pvr_mask               = 0xffff0000,
-               .pvr_value              = 0x00700000,
-               .cpu_name               = "Broadband Engine",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
-                       CPU_FTR_SMT,
-               .cpu_user_features      = COMMON_USER_PPC64 |
-                       PPC_FEATURE_HAS_ALTIVEC_COMP,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .cpu_setup              = __setup_cpu_be,
-       },
-       {       /* default match */
-               .pvr_mask               = 0x00000000,
-               .pvr_value              = 0x00000000,
-               .cpu_name               = "POWER4 (compatible)",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-                       CPU_FTR_PPCAS_ARCH_V2,
-               .cpu_user_features      = COMMON_USER_PPC64,
-               .icache_bsize           = 128,
-               .dcache_bsize           = 128,
-               .num_pmcs               = 6,
-               .cpu_setup              = __setup_cpu_power4,
-       }
-};
index ba93fd7..035d1b1 100644 (file)
@@ -33,7 +33,7 @@
 #include <asm/rtas.h>
 #include <asm/atomic.h>
 #include <asm/systemcfg.h>
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 #undef DEBUG
 
index e8c0bbf..5d2fcbe 100644 (file)
@@ -191,8 +191,8 @@ syscall_exit_trace_cont:
        ld      r1,GPR1(r1)
        mtlr    r4
        mtcr    r5
-       mtspr   SRR0,r7
-       mtspr   SRR1,r8
+       mtspr   SPRN_SRR0,r7
+       mtspr   SPRN_SRR1,r8
        rfid
        b       .       /* prevent speculative execution */
 
@@ -265,7 +265,7 @@ _GLOBAL(save_nvgprs)
  */
 _GLOBAL(ppc32_sigsuspend)
        bl      .save_nvgprs
-       bl      .sys32_sigsuspend
+       bl      .compat_sys_sigsuspend
        b       70f
 
 _GLOBAL(ppc64_rt_sigsuspend)
@@ -275,7 +275,7 @@ _GLOBAL(ppc64_rt_sigsuspend)
 
 _GLOBAL(ppc32_rt_sigsuspend)
        bl      .save_nvgprs
-       bl      .sys32_rt_sigsuspend
+       bl      .compat_sys_rt_sigsuspend
 70:    cmpdi   0,r3,0
        /* If it returned an error, we need to return via syscall_exit to set
           the SO bit in cr0 and potentially stop for ptrace. */
@@ -310,7 +310,7 @@ _GLOBAL(ppc_clone)
 
 _GLOBAL(ppc32_swapcontext)
        bl      .save_nvgprs
-       bl      .sys32_swapcontext
+       bl      .compat_sys_swapcontext
        b       80f
        
 _GLOBAL(ppc64_swapcontext)
@@ -319,11 +319,11 @@ _GLOBAL(ppc64_swapcontext)
        b       80f
 
 _GLOBAL(ppc32_sigreturn)
-       bl      .sys32_sigreturn
+       bl      .compat_sys_sigreturn
        b       80f
 
 _GLOBAL(ppc32_rt_sigreturn)
-       bl      .sys32_rt_sigreturn
+       bl      .compat_sys_rt_sigreturn
        b       80f
 
 _GLOBAL(ppc64_rt_sigreturn)
@@ -531,7 +531,7 @@ restore:
        mtctr   r3
        mtlr    r0
        ld      r3,_XER(r1)
-       mtspr   XER,r3
+       mtspr   SPRN_XER,r3
 
        REST_8GPRS(5, r1)
 
@@ -543,12 +543,12 @@ restore:
        mtmsrd  r0,1
 
        ld      r0,_MSR(r1)
-       mtspr   SRR1,r0
+       mtspr   SPRN_SRR1,r0
 
        ld      r2,_CCR(r1)
        mtcrf   0xFF,r2
        ld      r2,_NIP(r1)
-       mtspr   SRR0,r2
+       mtspr   SPRN_SRR0,r2
 
        ld      r0,GPR0(r1)
        ld      r2,GPR2(r1)
@@ -643,7 +643,7 @@ _GLOBAL(enter_rtas)
        std     r4,_CCR(r1)
        mfctr   r5
        std     r5,_CTR(r1)
-       mfspr   r6,XER
+       mfspr   r6,SPRN_XER
        std     r6,_XER(r1)
        mfdar   r7
        std     r7,_DAR(r1)
@@ -697,14 +697,14 @@ _GLOBAL(enter_rtas)
        ld      r5,RTASENTRY(r4)        /* get the rtas->entry value */
        ld      r4,RTASBASE(r4)         /* get the rtas->base value */
        
-       mtspr   SRR0,r5
-       mtspr   SRR1,r6
+       mtspr   SPRN_SRR0,r5
+       mtspr   SPRN_SRR1,r6
        rfid
        b       .       /* prevent speculative execution */
 
 _STATIC(rtas_return_loc)
        /* relocation is off at this point */
-       mfspr   r4,SPRG3                /* Get PACA */
+       mfspr   r4,SPRN_SPRG3           /* Get PACA */
        SET_REG_TO_CONST(r5, KERNELBASE)
         sub     r4,r4,r5                /* RELOC the PACA base pointer */
 
@@ -718,8 +718,8 @@ _STATIC(rtas_return_loc)
        LOADADDR(r3,.rtas_restore_regs)
         ld     r4,PACASAVEDMSR(r4)     /* Restore our MSR */
 
-       mtspr   SRR0,r3
-       mtspr   SRR1,r4
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
        rfid
        b       .       /* prevent speculative execution */
 
@@ -730,14 +730,14 @@ _STATIC(rtas_restore_regs)
        REST_8GPRS(14, r1)              /* Restore the non-volatiles */
        REST_10GPRS(22, r1)             /* ditto */
 
-       mfspr   r13,SPRG3
+       mfspr   r13,SPRN_SPRG3
 
        ld      r4,_CCR(r1)
        mtcr    r4
        ld      r5,_CTR(r1)
        mtctr   r5
        ld      r6,_XER(r1)
-       mtspr   XER,r6
+       mtspr   SPRN_XER,r6
        ld      r7,_DAR(r1)
        mtdar   r7
        ld      r8,_DSISR(r1)
@@ -774,7 +774,7 @@ _GLOBAL(enter_prom)
        std     r4,_CCR(r1)
        mfctr   r5
        std     r5,_CTR(r1)
-       mfspr   r6,XER
+       mfspr   r6,SPRN_XER
        std     r6,_XER(r1)
        mfdar   r7
        std     r7,_DAR(r1)
@@ -827,7 +827,7 @@ _GLOBAL(enter_prom)
        ld      r5,_CTR(r1)
        mtctr   r5
        ld      r6,_XER(r1)
-       mtspr   XER,r6
+       mtspr   SPRN_XER,r6
        ld      r7,_DAR(r1)
        mtdar   r7
        ld      r8,_DSISR(r1)
index 72c6104..f58af9c 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/setup.h>
 #include <asm/hvcall.h>
 #include <asm/iSeries/LparMap.h>
+#include <asm/thread_info.h>
 
 #ifdef CONFIG_PPC_ISERIES
 #define DO_SOFT_DISABLE
@@ -201,22 +202,22 @@ exception_marker:
 #define EX_CCR         60
 
 #define EXCEPTION_PROLOG_PSERIES(area, label)                          \
-       mfspr   r13,SPRG3;              /* get paca address into r13 */ \
+       mfspr   r13,SPRN_SPRG3;         /* get paca address into r13 */ \
        std     r9,area+EX_R9(r13);     /* save r9 - r12 */             \
        std     r10,area+EX_R10(r13);                                   \
        std     r11,area+EX_R11(r13);                                   \
        std     r12,area+EX_R12(r13);                                   \
-       mfspr   r9,SPRG1;                                               \
+       mfspr   r9,SPRN_SPRG1;                                          \
        std     r9,area+EX_R13(r13);                                    \
        mfcr    r9;                                                     \
        clrrdi  r12,r13,32;             /* get high part of &label */   \
        mfmsr   r10;                                                    \
-       mfspr   r11,SRR0;               /* save SRR0 */                 \
+       mfspr   r11,SPRN_SRR0;          /* save SRR0 */                 \
        ori     r12,r12,(label)@l;      /* virt addr of handler */      \
        ori     r10,r10,MSR_IR|MSR_DR|MSR_RI;                           \
-       mtspr   SRR0,r12;                                               \
-       mfspr   r12,SRR1;               /* and SRR1 */                  \
-       mtspr   SRR1,r10;                                               \
+       mtspr   SPRN_SRR0,r12;                                          \
+       mfspr   r12,SPRN_SRR1;          /* and SRR1 */                  \
+       mtspr   SPRN_SRR1,r10;                                          \
        rfid;                                                           \
        b       .       /* prevent speculative execution */
 
@@ -225,12 +226,12 @@ exception_marker:
  * This code runs with relocation on.
  */
 #define EXCEPTION_PROLOG_ISERIES_1(area)                               \
-       mfspr   r13,SPRG3;              /* get paca address into r13 */ \
+       mfspr   r13,SPRN_SPRG3;         /* get paca address into r13 */ \
        std     r9,area+EX_R9(r13);     /* save r9 - r12 */             \
        std     r10,area+EX_R10(r13);                                   \
        std     r11,area+EX_R11(r13);                                   \
        std     r12,area+EX_R12(r13);                                   \
-       mfspr   r9,SPRG1;                                               \
+       mfspr   r9,SPRN_SPRG1;                                          \
        std     r9,area+EX_R13(r13);                                    \
        mfcr    r9
 
@@ -283,7 +284,7 @@ exception_marker:
        std     r9,_LINK(r1);                                              \
        mfctr   r10;                    /* save CTR in stackframe       */ \
        std     r10,_CTR(r1);                                              \
-       mfspr   r11,XER;                /* save XER in stackframe       */ \
+       mfspr   r11,SPRN_XER;           /* save XER in stackframe       */ \
        std     r11,_XER(r1);                                              \
        li      r9,(n)+1;                                                  \
        std     r9,_TRAP(r1);           /* set trap number              */ \
@@ -300,7 +301,7 @@ exception_marker:
        .globl label##_pSeries;                         \
 label##_pSeries:                                       \
        HMT_MEDIUM;                                     \
-       mtspr   SPRG1,r13;              /* save r13 */  \
+       mtspr   SPRN_SPRG1,r13;         /* save r13 */  \
        RUNLATCH_ON(r13);                               \
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
 
@@ -308,7 +309,7 @@ label##_pSeries:                                    \
        .globl label##_iSeries;                         \
 label##_iSeries:                                       \
        HMT_MEDIUM;                                     \
-       mtspr   SPRG1,r13;              /* save r13 */  \
+       mtspr   SPRN_SPRG1,r13;         /* save r13 */  \
        RUNLATCH_ON(r13);                               \
        EXCEPTION_PROLOG_ISERIES_1(area);               \
        EXCEPTION_PROLOG_ISERIES_2;                     \
@@ -318,7 +319,7 @@ label##_iSeries:                                    \
        .globl label##_iSeries;                                         \
 label##_iSeries:                                                       \
        HMT_MEDIUM;                                                     \
-       mtspr   SPRG1,r13;              /* save r13 */                  \
+       mtspr   SPRN_SPRG1,r13;         /* save r13 */                  \
        RUNLATCH_ON(r13);                                               \
        EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);                         \
        lbz     r10,PACAPROCENABLED(r13);                               \
@@ -388,7 +389,7 @@ __start_interrupts:
        . = 0x200
 _machine_check_pSeries:
        HMT_MEDIUM
-       mtspr   SPRG1,r13               /* save r13 */
+       mtspr   SPRN_SPRG1,r13          /* save r13 */
        RUNLATCH_ON(r13)
        EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
 
@@ -396,18 +397,18 @@ _machine_check_pSeries:
        .globl data_access_pSeries
 data_access_pSeries:
        HMT_MEDIUM
-       mtspr   SPRG1,r13
+       mtspr   SPRN_SPRG1,r13
 BEGIN_FTR_SECTION
-       mtspr   SPRG2,r12
-       mfspr   r13,DAR
-       mfspr   r12,DSISR
+       mtspr   SPRN_SPRG2,r12
+       mfspr   r13,SPRN_DAR
+       mfspr   r12,SPRN_DSISR
        srdi    r13,r13,60
        rlwimi  r13,r12,16,0x20
        mfcr    r12
        cmpwi   r13,0x2c
        beq     .do_stab_bolted_pSeries
        mtcrf   0x80,r12
-       mfspr   r12,SPRG2
+       mfspr   r12,SPRN_SPRG2
 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
 
@@ -415,19 +416,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
        .globl data_access_slb_pSeries
 data_access_slb_pSeries:
        HMT_MEDIUM
-       mtspr   SPRG1,r13
+       mtspr   SPRN_SPRG1,r13
        RUNLATCH_ON(r13)
-       mfspr   r13,SPRG3               /* get paca address into r13 */
+       mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
        std     r9,PACA_EXSLB+EX_R9(r13)        /* save r9 - r12 */
        std     r10,PACA_EXSLB+EX_R10(r13)
        std     r11,PACA_EXSLB+EX_R11(r13)
        std     r12,PACA_EXSLB+EX_R12(r13)
        std     r3,PACA_EXSLB+EX_R3(r13)
-       mfspr   r9,SPRG1
+       mfspr   r9,SPRN_SPRG1
        std     r9,PACA_EXSLB+EX_R13(r13)
        mfcr    r9
-       mfspr   r12,SRR1                /* and SRR1 */
-       mfspr   r3,DAR
+       mfspr   r12,SPRN_SRR1           /* and SRR1 */
+       mfspr   r3,SPRN_DAR
        b       .do_slb_miss            /* Rel. branch works in real mode */
 
        STD_EXCEPTION_PSERIES(0x400, instruction_access)
@@ -436,19 +437,19 @@ data_access_slb_pSeries:
        .globl instruction_access_slb_pSeries
 instruction_access_slb_pSeries:
        HMT_MEDIUM
-       mtspr   SPRG1,r13
+       mtspr   SPRN_SPRG1,r13
        RUNLATCH_ON(r13)
-       mfspr   r13,SPRG3               /* get paca address into r13 */
+       mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
        std     r9,PACA_EXSLB+EX_R9(r13)        /* save r9 - r12 */
        std     r10,PACA_EXSLB+EX_R10(r13)
        std     r11,PACA_EXSLB+EX_R11(r13)
        std     r12,PACA_EXSLB+EX_R12(r13)
        std     r3,PACA_EXSLB+EX_R3(r13)
-       mfspr   r9,SPRG1
+       mfspr   r9,SPRN_SPRG1
        std     r9,PACA_EXSLB+EX_R13(r13)
        mfcr    r9
-       mfspr   r12,SRR1                /* and SRR1 */
-       mfspr   r3,SRR0                 /* SRR0 is faulting address */
+       mfspr   r12,SPRN_SRR1           /* and SRR1 */
+       mfspr   r3,SPRN_SRR0                    /* SRR0 is faulting address */
        b       .do_slb_miss            /* Rel. branch works in real mode */
 
        STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
@@ -466,15 +467,15 @@ system_call_pSeries:
        RUNLATCH_ON(r9)
        mr      r9,r13
        mfmsr   r10
-       mfspr   r13,SPRG3
-       mfspr   r11,SRR0
+       mfspr   r13,SPRN_SPRG3
+       mfspr   r11,SPRN_SRR0
        clrrdi  r12,r13,32
        oris    r12,r12,system_call_common@h
        ori     r12,r12,system_call_common@l
-       mtspr   SRR0,r12
+       mtspr   SPRN_SRR0,r12
        ori     r10,r10,MSR_IR|MSR_DR|MSR_RI
-       mfspr   r12,SRR1
-       mtspr   SRR1,r10
+       mfspr   r12,SPRN_SRR1
+       mtspr   SPRN_SRR1,r10
        rfid
        b       .       /* prevent speculative execution */
 
@@ -504,25 +505,25 @@ system_call_pSeries:
        .align  7
 _GLOBAL(do_stab_bolted_pSeries)
        mtcrf   0x80,r12
-       mfspr   r12,SPRG2
+       mfspr   r12,SPRN_SPRG2
        EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
 
 /*
  * Vectors for the FWNMI option.  Share common code.
  */
-      .globl system_reset_fwnmi
+       .globl system_reset_fwnmi
 system_reset_fwnmi:
-      HMT_MEDIUM
-      mtspr   SPRG1,r13               /* save r13 */
-      RUNLATCH_ON(r13)
-      EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
+       HMT_MEDIUM
+       mtspr   SPRN_SPRG1,r13          /* save r13 */
+       RUNLATCH_ON(r13)
+       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
 
-      .globl machine_check_fwnmi
+       .globl machine_check_fwnmi
 machine_check_fwnmi:
-      HMT_MEDIUM
-      mtspr   SPRG1,r13               /* save r13 */
-      RUNLATCH_ON(r13)
-      EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+       HMT_MEDIUM
+       mtspr   SPRN_SPRG1,r13          /* save r13 */
+       RUNLATCH_ON(r13)
+       EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
 
 #ifdef CONFIG_PPC_ISERIES
 /***  ISeries-LPAR interrupt handlers ***/
@@ -531,18 +532,18 @@ machine_check_fwnmi:
 
        .globl data_access_iSeries
 data_access_iSeries:
-       mtspr   SPRG1,r13
+       mtspr   SPRN_SPRG1,r13
 BEGIN_FTR_SECTION
-       mtspr   SPRG2,r12
-       mfspr   r13,DAR
-       mfspr   r12,DSISR
+       mtspr   SPRN_SPRG2,r12
+       mfspr   r13,SPRN_DAR
+       mfspr   r12,SPRN_DSISR
        srdi    r13,r13,60
        rlwimi  r13,r12,16,0x20
        mfcr    r12
        cmpwi   r13,0x2c
        beq     .do_stab_bolted_iSeries
        mtcrf   0x80,r12
-       mfspr   r12,SPRG2
+       mfspr   r12,SPRN_SPRG2
 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
        EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
        EXCEPTION_PROLOG_ISERIES_2
@@ -550,25 +551,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
 
 .do_stab_bolted_iSeries:
        mtcrf   0x80,r12
-       mfspr   r12,SPRG2
+       mfspr   r12,SPRN_SPRG2
        EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
        EXCEPTION_PROLOG_ISERIES_2
        b       .do_stab_bolted
 
        .globl  data_access_slb_iSeries
 data_access_slb_iSeries:
-       mtspr   SPRG1,r13               /* save r13 */
+       mtspr   SPRN_SPRG1,r13          /* save r13 */
        EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
        std     r3,PACA_EXSLB+EX_R3(r13)
        ld      r12,PACALPPACA+LPPACASRR1(r13)
-       mfspr   r3,DAR
+       mfspr   r3,SPRN_DAR
        b       .do_slb_miss
 
        STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
 
        .globl  instruction_access_slb_iSeries
 instruction_access_slb_iSeries:
-       mtspr   SPRG1,r13               /* save r13 */
+       mtspr   SPRN_SPRG1,r13          /* save r13 */
        EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
        std     r3,PACA_EXSLB+EX_R3(r13)
        ld      r12,PACALPPACA+LPPACASRR1(r13)
@@ -586,7 +587,7 @@ instruction_access_slb_iSeries:
        .globl  system_call_iSeries
 system_call_iSeries:
        mr      r9,r13
-       mfspr   r13,SPRG3
+       mfspr   r13,SPRN_SPRG3
        EXCEPTION_PROLOG_ISERIES_2
        b       system_call_common
 
@@ -596,7 +597,7 @@ system_call_iSeries:
 
        .globl system_reset_iSeries
 system_reset_iSeries:
-       mfspr   r13,SPRG3               /* Get paca address */
+       mfspr   r13,SPRN_SPRG3          /* Get paca address */
        mfmsr   r24
        ori     r24,r24,MSR_RI
        mtmsrd  r24                     /* RI on */
@@ -639,7 +640,7 @@ iSeries_secondary_smp_loop:
 #endif /* CONFIG_SMP */
        li      r0,-1                   /* r0=-1 indicates a Hypervisor call */
        sc                              /* Invoke the hypervisor via a system call */
-       mfspr   r13,SPRG3               /* Put r13 back ???? */
+       mfspr   r13,SPRN_SPRG3          /* Put r13 back ???? */
        b       1b                      /* If SMP not configured, secondaries
                                         * loop forever */
 
@@ -656,8 +657,8 @@ hardware_interrupt_iSeries_masked:
        mtcrf   0x80,r9         /* Restore regs */
        ld      r11,PACALPPACA+LPPACASRR0(r13)
        ld      r12,PACALPPACA+LPPACASRR1(r13)
-       mtspr   SRR0,r11
-       mtspr   SRR1,r12
+       mtspr   SPRN_SRR0,r11
+       mtspr   SPRN_SRR1,r12
        ld      r9,PACA_EXGEN+EX_R9(r13)
        ld      r10,PACA_EXGEN+EX_R10(r13)
        ld      r11,PACA_EXGEN+EX_R11(r13)
@@ -713,8 +714,8 @@ bad_stack:
        std     r10,GPR1(r1)
        std     r11,_NIP(r1)
        std     r12,_MSR(r1)
-       mfspr   r11,DAR
-       mfspr   r12,DSISR
+       mfspr   r11,SPRN_DAR
+       mfspr   r12,SPRN_DSISR
        std     r11,_DAR(r1)
        std     r12,_DSISR(r1)
        mflr    r10
@@ -766,8 +767,8 @@ fast_exception_return:
        clrrdi  r10,r10,2               /* clear RI (LE is 0 already) */
        mtmsrd  r10,1
 
-       mtspr   SRR1,r12
-       mtspr   SRR0,r11
+       mtspr   SPRN_SRR1,r12
+       mtspr   SPRN_SRR0,r11
        REST_4GPRS(10, r1)
        ld      r1,GPR1(r1)
        rfid
@@ -788,9 +789,9 @@ unrecov_fer:
        .globl data_access_common
 data_access_common:
        RUNLATCH_ON(r10)                /* It wont fit in the 0x300 handler */
-       mfspr   r10,DAR
+       mfspr   r10,SPRN_DAR
        std     r10,PACA_EXGEN+EX_DAR(r13)
-       mfspr   r10,DSISR
+       mfspr   r10,SPRN_DSISR
        stw     r10,PACA_EXGEN+EX_DSISR(r13)
        EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
        ld      r3,PACA_EXGEN+EX_DAR(r13)
@@ -821,9 +822,9 @@ hardware_interrupt_entry:
        .align  7
        .globl alignment_common
 alignment_common:
-       mfspr   r10,DAR
+       mfspr   r10,SPRN_DAR
        std     r10,PACA_EXGEN+EX_DAR(r13)
-       mfspr   r10,DSISR
+       mfspr   r10,SPRN_DSISR
        stw     r10,PACA_EXGEN+EX_DSISR(r13)
        EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
        ld      r3,PACA_EXGEN+EX_DAR(r13)
@@ -1120,7 +1121,7 @@ _GLOBAL(do_stab_bolted)
 
        /* Hash to the primary group */
        ld      r10,PACASTABVIRT(r13)
-       mfspr   r11,DAR
+       mfspr   r11,SPRN_DAR
        srdi    r11,r11,28
        rldimi  r10,r11,7,52    /* r10 = first ste of the group */
 
@@ -1162,7 +1163,7 @@ _GLOBAL(do_stab_bolted)
 2:     std     r9,8(r10)       /* Store the vsid part of the ste       */
        eieio
 
-       mfspr   r11,DAR         /* Get the new esid                     */
+       mfspr   r11,SPRN_DAR            /* Get the new esid                     */
        clrrdi  r11,r11,28      /* Permits a full 32b of ESID           */
        ori     r11,r11,0x90    /* Turn on valid and kp                 */
        std     r11,0(r10)      /* Put new entry back into the stab     */
@@ -1182,8 +1183,8 @@ _GLOBAL(do_stab_bolted)
        clrrdi  r10,r10,2
        mtmsrd  r10,1
 
-       mtspr   SRR0,r11
-       mtspr   SRR1,r12
+       mtspr   SPRN_SRR0,r11
+       mtspr   SPRN_SRR1,r12
        ld      r9,PACA_EXSLB+EX_R9(r13)
        ld      r10,PACA_EXSLB+EX_R10(r13)
        ld      r11,PACA_EXSLB+EX_R11(r13)
@@ -1229,8 +1230,8 @@ _GLOBAL(do_slb_miss)
 .machine       pop
 
 #ifdef CONFIG_PPC_ISERIES
-       mtspr   SRR0,r11
-       mtspr   SRR1,r12
+       mtspr   SPRN_SRR0,r11
+       mtspr   SPRN_SRR1,r12
 #endif /* CONFIG_PPC_ISERIES */
        ld      r9,PACA_EXSLB+EX_R9(r13)
        ld      r10,PACA_EXSLB+EX_R10(r13)
@@ -1253,7 +1254,7 @@ unrecov_slb:
  *
  * On iSeries, the hypervisor must fill in at least one entry before
  * we get control (with relocate on).  The address is give to the hv
- * as a page number (see xLparMap in LparData.c), so this must be at a
+ * as a page number (see xLparMap in lpardata.c), so this must be at a
  * fixed address (the linker can't compute (u64)&initial_stab >>
  * PAGE_SHIFT).
  */
@@ -1316,7 +1317,7 @@ _GLOBAL(pSeries_secondary_smp_init)
        mr      r3,r24                  /* not found, copy phys to r3    */
        b       .kexec_wait             /* next kernel might do better   */
 
-2:     mtspr   SPRG3,r13               /* Save vaddr of paca in SPRG3   */
+2:     mtspr   SPRN_SPRG3,r13          /* Save vaddr of paca in SPRG3   */
        /* From now on, r24 is expected to be logical cpuid */
        mr      r24,r5
 3:     HMT_LOW
@@ -1364,6 +1365,7 @@ _STATIC(__start_initialization_iSeries)
        addi    r2,r2,0x4000
 
        bl      .iSeries_early_setup
+       bl      .early_setup
 
        /* relocation is on at this point */
 
@@ -1554,20 +1556,17 @@ copy_to_here:
        .section ".text";
        .align 2 ;
 
-       .globl  pmac_secondary_start_1  
-pmac_secondary_start_1:        
-       li      r24, 1
-       b       .pmac_secondary_start
-       
-       .globl pmac_secondary_start_2
-pmac_secondary_start_2:        
-       li      r24, 2
-       b       .pmac_secondary_start
-       
-       .globl pmac_secondary_start_3
-pmac_secondary_start_3:
-       li      r24, 3
-       b       .pmac_secondary_start
+       .globl  __secondary_start_pmac_0
+__secondary_start_pmac_0:
+       /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
+       li      r24,0
+       b       1f
+       li      r24,1
+       b       1f
+       li      r24,2
+       b       1f
+       li      r24,3
+1:
        
 _GLOBAL(pmac_secondary_start)
        /* turn on 64-bit mode */
@@ -1586,7 +1585,7 @@ _GLOBAL(pmac_secondary_start)
        LOADADDR(r4, paca)               /* Get base vaddr of paca array        */
        mulli   r13,r24,PACA_SIZE        /* Calculate vaddr of right paca */
        add     r13,r13,r4              /* for this processor.          */
-       mtspr   SPRG3,r13                /* Save vaddr of paca in SPRG3 */
+       mtspr   SPRN_SPRG3,r13           /* Save vaddr of paca in SPRG3 */
 
        /* Create a temp kernel stack for use before relocation is on.  */
        ld      r1,PACAEMERGSP(r13)
@@ -1621,7 +1620,7 @@ _GLOBAL(__secondary_start)
        /* Initialize the page table pointer register. */
        LOADADDR(r6,_SDR1)
        ld      r6,0(r6)                /* get the value of _SDR1        */
-       mtspr   SDR1,r6                 /* set the htab location         */
+       mtspr   SPRN_SDR1,r6                    /* set the htab location         */
 #endif
        /* Initialize the first segment table (or SLB) entry             */
        ld      r3,PACASTABVIRT(r13)    /* get addr of segment table     */
@@ -1650,7 +1649,7 @@ _GLOBAL(__secondary_start)
        lwz     r3,PLATFORM(r3)         /* r3 = platform flags           */
        andi.   r3,r3,PLATFORM_LPAR     /* Test if bit 0 is set (LPAR bit) */
        beq     98f                     /* branch if result is 0  */
-       mfspr   r3,PVR
+       mfspr   r3,SPRN_PVR
        srwi    r3,r3,16
        cmpwi   r3,0x37                 /* SStar  */
        beq     97f
@@ -1674,8 +1673,8 @@ _GLOBAL(__secondary_start)
 #ifdef DO_SOFT_DISABLE
        ori     r4,r4,MSR_EE
 #endif
-       mtspr   SRR0,r3
-       mtspr   SRR1,r4
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
        rfid
        b       .       /* prevent speculative execution */
 
@@ -1737,7 +1736,7 @@ _STATIC(start_here_multiplatform)
 
 #ifdef CONFIG_HMT
        /* Start up the second thread on cpu 0 */
-       mfspr   r3,PVR
+       mfspr   r3,SPRN_PVR
        srwi    r3,r3,16
        cmpwi   r3,0x34                 /* Pulsar  */
        beq     90f
@@ -1797,7 +1796,7 @@ _STATIC(start_here_multiplatform)
        mulli   r13,r27,PACA_SIZE       /* Calculate vaddr of right paca */
        add     r13,r13,r24             /* for this processor.           */
        sub     r13,r13,r26             /* convert to physical addr      */
-       mtspr   SPRG3,r13               /* PPPBBB: Temp... -Peter */
+       mtspr   SPRN_SPRG3,r13          /* PPPBBB: Temp... -Peter */
        
        /* Do very early kernel initializations, including initial hash table,
         * stab and slb setup before we turn on relocation.     */
@@ -1814,7 +1813,7 @@ _STATIC(start_here_multiplatform)
        lwz     r3,PLATFORM(r3)         /* r3 = platform flags */
        andi.   r3,r3,PLATFORM_LPAR     /* Test if bit 0 is set (LPAR bit) */
        beq     98f                     /* branch if result is 0  */
-       mfspr   r3,PVR
+       mfspr   r3,SPRN_PVR
        srwi    r3,r3,16
        cmpwi   r3,0x37                 /* SStar */
        beq     97f
@@ -1838,12 +1837,12 @@ _STATIC(start_here_multiplatform)
        LOADADDR(r6,_SDR1)              /* Only if NOT LPAR */
        sub     r6,r6,r26
        ld      r6,0(r6)                /* get the value of _SDR1 */
-       mtspr   SDR1,r6                 /* set the htab location  */
+       mtspr   SPRN_SDR1,r6                    /* set the htab location  */
 98: 
        LOADADDR(r3,.start_here_common)
        SET_REG_TO_CONST(r4, MSR_KERNEL)
-       mtspr   SRR0,r3
-       mtspr   SRR1,r4
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
        rfid
        b       .       /* prevent speculative execution */
 #endif /* CONFIG_PPC_MULTIPLATFORM */
@@ -1874,7 +1873,7 @@ _STATIC(start_here_common)
        LOADADDR(r24, paca)             /* Get base vaddr of paca array  */
        mulli   r13,r26,PACA_SIZE       /* Calculate vaddr of right paca */
        add     r13,r13,r24             /* for this processor.           */
-       mtspr   SPRG3,r13
+       mtspr   SPRN_SPRG3,r13
 
        /* ptr to current */
        LOADADDR(r4,init_task)
@@ -1901,7 +1900,7 @@ _STATIC(start_here_common)
 _GLOBAL(hmt_init)
 #ifdef CONFIG_HMT
        LOADADDR(r5, hmt_thread_data)
-       mfspr   r7,PVR
+       mfspr   r7,SPRN_PVR
        srwi    r7,r7,16
        cmpwi   r7,0x34                 /* Pulsar  */
        beq     90f
@@ -1910,10 +1909,10 @@ _GLOBAL(hmt_init)
        cmpwi   r7,0x37                 /* SStar   */
        beq     91f
        b       101f
-90:    mfspr   r6,PIR
+90:    mfspr   r6,SPRN_PIR
        andi.   r6,r6,0x1f
        b       92f
-91:    mfspr   r6,PIR
+91:    mfspr   r6,SPRN_PIR
        andi.   r6,r6,0x3ff
 92:    sldi    r4,r24,3
        stwx    r6,r5,r4
@@ -1924,8 +1923,8 @@ __hmt_secondary_hold:
        LOADADDR(r5, hmt_thread_data)
        clrldi  r5,r5,4
        li      r7,0
-       mfspr   r6,PIR
-       mfspr   r8,PVR
+       mfspr   r6,SPRN_PIR
+       mfspr   r8,SPRN_PVR
        srwi    r8,r8,16
        cmpwi   r8,0x34
        bne     93f
@@ -1951,39 +1950,41 @@ __hmt_secondary_hold:
 _GLOBAL(hmt_start_secondary)
        LOADADDR(r4,__hmt_secondary_hold)
        clrldi  r4,r4,4
-       mtspr   NIADORM, r4
-       mfspr   r4, MSRDORM
+       mtspr   SPRN_NIADORM, r4
+       mfspr   r4, SPRN_MSRDORM
        li      r5, -65
        and     r4, r4, r5
-       mtspr   MSRDORM, r4
+       mtspr   SPRN_MSRDORM, r4
        lis     r4,0xffef
        ori     r4,r4,0x7403
-       mtspr   TSC, r4
+       mtspr   SPRN_TSC, r4
        li      r4,0x1f4
-       mtspr   TST, r4
-       mfspr   r4, HID0
+       mtspr   SPRN_TST, r4
+       mfspr   r4, SPRN_HID0
        ori     r4, r4, 0x1
-       mtspr   HID0, r4
+       mtspr   SPRN_HID0, r4
        mfspr   r4, SPRN_CTRLF
        oris    r4, r4, 0x40
        mtspr   SPRN_CTRLT, r4
        blr
 #endif
 
-#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
+#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
 _GLOBAL(smp_release_cpus)
        /* All secondary cpus are spinning on a common
         * spinloop, release them all now so they can start
         * to spin on their individual paca spinloops.
         * For non SMP kernels, the secondary cpus never
         * get out of the common spinloop.
+        * XXX This does nothing useful on iSeries, secondaries are
+        * already waiting on their paca.
         */
        li      r3,1
        LOADADDR(r5,__secondary_hold_spinloop)
        std     r3,0(r5)
        sync
        blr
-#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
+#endif /* CONFIG_SMP */
 
 
 /*
@@ -1992,7 +1993,7 @@ _GLOBAL(smp_release_cpus)
  */
        .section ".bss"
 
-       .align  12
+       .align  PAGE_SHIFT
 
        .globl  empty_zero_page
 empty_zero_page:
index 954395d..8abd2ad 100644 (file)
@@ -31,7 +31,7 @@
 
 extern void power4_idle(void);
 
-int default_idle(void)
+void default_idle(void)
 {
        long oldval;
        unsigned int cpu = smp_processor_id();
@@ -64,11 +64,9 @@ int default_idle(void)
                if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
                        cpu_die();
        }
-
-       return 0;
 }
 
-int native_idle(void)
+void native_idle(void)
 {
        while (1) {
                ppc64_runlatch_off();
@@ -85,8 +83,6 @@ int native_idle(void)
                    system_state == SYSTEM_RUNNING)
                        cpu_die();
        }
-
-       return 0;
 }
 
 void cpu_idle(void)
index 9c6facc..ed876a5 100644 (file)
@@ -395,7 +395,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
                if (post_kprobe_handler(args->regs))
                        ret = NOTIFY_STOP;
                break;
-       case DIE_GPF:
        case DIE_PAGE_FAULT:
                if (kprobe_running() &&
                    kprobe_fault_handler(args->regs, args->trapnr))
index 1d297e0..633324b 100644 (file)
@@ -23,8 +23,7 @@
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/iommu.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 #ifdef DEBUG
 #define DBG(x...) printk(x)
@@ -276,7 +275,7 @@ static void __init setup_u3_agp(struct pci_controller* hose)
 {
        /* On G5, we move AGP up to high bus number so we don't need
         * to reassign bus numbers for HT. If we ever have P2P bridges
-        * on AGP, we'll have to move pci_assign_all_busses to the
+        * on AGP, we'll have to move pci_assign_all_buses to the
         * pci_controller structure so we enable it for AGP and not for
         * HT childs.
         * We hard code the address because of the different size of
@@ -360,7 +359,7 @@ static int __init add_bridge(struct device_node *dev)
 
        /* Interpret the "ranges" property */
        /* This also maps the I/O region and sets isa_io/mem_base */
-       pci_process_bridge_OF_ranges(hose, dev);
+       pci_process_bridge_OF_ranges(hose, dev, primary);
        pci_setup_phb_io(hose, primary);
 
        /* Fixup "bus-range" OF property */
index fc05674..a107ed6 100644 (file)
@@ -59,8 +59,8 @@
 #include <asm/time.h>
 #include <asm/of_device.h>
 #include <asm/lmb.h>
-
-#include "mpic.h"
+#include <asm/mpic.h>
+#include <asm/udbg.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -70,7 +70,7 @@
 
 extern int maple_set_rtc_time(struct rtc_time *tm);
 extern void maple_get_rtc_time(struct rtc_time *tm);
-extern void maple_get_boot_time(struct rtc_time *tm);
+extern unsigned long maple_get_boot_time(void);
 extern void maple_calibrate_decr(void);
 extern void maple_pci_init(void);
 extern void maple_pcibios_fixup(void);
index d65210a..445cb74 100644 (file)
@@ -156,8 +156,9 @@ int maple_set_rtc_time(struct rtc_time *tm)
        return 0;
 }
 
-void __init maple_get_boot_time(struct rtc_time *tm)
+unsigned long __init maple_get_boot_time(void)
 {
+       struct rtc_time tm;
        struct device_node *rtcs;
 
        rtcs = find_compatible_devices("rtc", "pnpPNP,b00");
@@ -170,6 +171,8 @@ void __init maple_get_boot_time(struct rtc_time *tm)
                       "legacy address (0x%x)\n", maple_rtc_addr);
        }
        
-       maple_get_rtc_time(tm);
+       maple_get_rtc_time(&tm);
+       return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
+                     tm.tm_hour, tm.tm_min, tm.tm_sec);
 }
 
index e7241ad..a33448c 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cputable.h>
+#include <asm/thread_info.h>
 
        .text
 
@@ -64,44 +65,6 @@ _GLOBAL(get_srr1)
 _GLOBAL(get_sp)
        mr      r3,r1
        blr
-               
-#ifdef CONFIG_PPC_ISERIES
-/* unsigned long local_save_flags(void) */
-_GLOBAL(local_get_flags)
-       lbz     r3,PACAPROCENABLED(r13)
-       blr
-
-/* unsigned long local_irq_disable(void) */
-_GLOBAL(local_irq_disable)
-       lbz     r3,PACAPROCENABLED(r13)
-       li      r4,0
-       stb     r4,PACAPROCENABLED(r13)
-       blr                     /* Done */
-
-/* void local_irq_restore(unsigned long flags) */      
-_GLOBAL(local_irq_restore)
-       lbz     r5,PACAPROCENABLED(r13)
-        /* Check if things are setup the way we want _already_. */
-       cmpw    0,r3,r5
-       beqlr
-       /* are we enabling interrupts? */
-       cmpdi   0,r3,0
-       stb     r3,PACAPROCENABLED(r13)
-       beqlr
-       /* Check pending interrupts */
-       /*   A decrementer, IPI or PMC interrupt may have occurred
-        *   while we were in the hypervisor (which enables) */
-       ld      r4,PACALPPACA+LPPACAANYINT(r13)
-       cmpdi   r4,0
-       beqlr
-
-       /*
-        * Handle pending interrupts in interrupt context
-        */
-       li      r0,0x5555
-       sc
-       blr
-#endif /* CONFIG_PPC_ISERIES */
 
 #ifdef CONFIG_IRQSTACKS
 _GLOBAL(call_do_softirq)
@@ -329,7 +292,7 @@ _GLOBAL(__flush_dcache_icache)
 
 /* Flush the dcache */
        ld      r7,PPC64_CACHES@toc(r2)
-       clrrdi  r3,r3,12                    /* Page align */
+       clrrdi  r3,r3,PAGE_SHIFT                    /* Page align */
        lwz     r4,DCACHEL1LINESPERPAGE(r7)     /* Get # dcache lines per page */
        lwz     r5,DCACHEL1LINESIZE(r7)         /* Get dcache line size */
        mr      r6,r3
@@ -778,6 +741,13 @@ _GLOBAL(giveup_altivec)
 _GLOBAL(__setup_cpu_power3)
        blr
 
+_GLOBAL(execve)
+       li      r0,__NR_execve
+       sc
+       bnslr
+       neg     r3,r3
+       blr
+
 /* kexec_wait(phys_cpu)
  *
  * wait for the flag to change, indicating this kernel is going away but
@@ -959,11 +929,11 @@ _GLOBAL(sys_call_table32)
        .llong .sys_write
        .llong .compat_sys_open         /* 5 */
        .llong .sys_close
-       .llong .sys32_waitpid
-       .llong .sys32_creat
+       .llong .compat_sys_waitpid
+       .llong .compat_sys_creat
        .llong .sys_link
        .llong .sys_unlink              /* 10 */
-       .llong .sys32_execve
+       .llong .compat_sys_execve
        .llong .sys_chdir
        .llong .compat_sys_time
        .llong .sys_mknod
@@ -978,20 +948,20 @@ _GLOBAL(sys_call_table32)
        .llong .sys_setuid
        .llong .sys_getuid
        .llong .compat_sys_stime        /* 25 */
-       .llong .sys32_ptrace
+       .llong .compat_sys_ptrace
        .llong .sys_alarm
        .llong .sys_ni_syscall          /* old fstat syscall */
-       .llong .sys32_pause
-       .llong .compat_sys_utime                /* 30 */
+       .llong .compat_sys_pause
+       .llong .compat_sys_utime        /* 30 */
        .llong .sys_ni_syscall          /* old stty syscall */
        .llong .sys_ni_syscall          /* old gtty syscall */
-       .llong .sys32_access
-       .llong .sys32_nice
+       .llong .compat_sys_access
+       .llong .compat_sys_nice
        .llong .sys_ni_syscall          /* 35 - old ftime syscall */
        .llong .sys_sync
-       .llong .sys32_kill
+       .llong .compat_sys_kill
        .llong .sys_rename
-       .llong .sys32_mkdir
+       .llong .compat_sys_mkdir
        .llong .sys_rmdir               /* 40 */
        .llong .sys_dup
        .llong .sys_pipe
@@ -1009,35 +979,35 @@ _GLOBAL(sys_call_table32)
        .llong .compat_sys_ioctl
        .llong .compat_sys_fcntl                /* 55 */
        .llong .sys_ni_syscall          /* old mpx syscall */
-       .llong .sys32_setpgid
+       .llong .compat_sys_setpgid
        .llong .sys_ni_syscall          /* old ulimit syscall */
-       .llong .sys32_olduname
-       .llong .sys32_umask             /* 60 */
+       .llong .sys_olduname
+       .llong .compat_sys_umask        /* 60 */
        .llong .sys_chroot
        .llong .sys_ustat
        .llong .sys_dup2
        .llong .sys_getppid
        .llong .sys_getpgrp             /* 65 */
        .llong .sys_setsid
-       .llong .sys32_sigaction
+       .llong .compat_sys_sigaction
        .llong .sys_sgetmask
-       .llong .sys32_ssetmask
+       .llong .compat_sys_ssetmask
        .llong .sys_setreuid            /* 70 */
        .llong .sys_setregid
        .llong .ppc32_sigsuspend
        .llong .compat_sys_sigpending
-       .llong .sys32_sethostname
-       .llong .compat_sys_setrlimit            /* 75 */
+       .llong .compat_sys_sethostname
+       .llong .compat_sys_setrlimit    /* 75 */
        .llong .compat_sys_old_getrlimit
        .llong .compat_sys_getrusage
-       .llong .sys32_gettimeofday
-       .llong .sys32_settimeofday
-       .llong .sys32_getgroups         /* 80 */
-       .llong .sys32_setgroups
+       .llong .compat_sys_gettimeofday
+       .llong .compat_sys_settimeofday
+       .llong .compat_sys_getgroups    /* 80 */
+       .llong .compat_sys_setgroups
        .llong .sys_ni_syscall          /* old select syscall */
        .llong .sys_symlink
        .llong .sys_ni_syscall          /* old lstat syscall */
-       .llong .sys32_readlink          /* 85 */
+       .llong .compat_sys_readlink     /* 85 */
        .llong .sys_uselib
        .llong .sys_swapon
        .llong .sys_reboot
@@ -1048,35 +1018,35 @@ _GLOBAL(sys_call_table32)
        .llong .sys_ftruncate
        .llong .sys_fchmod
        .llong .sys_fchown              /* 95 */
-       .llong .sys32_getpriority
-       .llong .sys32_setpriority
+       .llong .compat_sys_getpriority
+       .llong .compat_sys_setpriority
        .llong .sys_ni_syscall          /* old profil syscall */
        .llong .compat_sys_statfs
-       .llong .compat_sys_fstatfs              /* 100 */
+       .llong .compat_sys_fstatfs      /* 100 */
        .llong .sys_ni_syscall          /* old ioperm syscall */
        .llong .compat_sys_socketcall
-       .llong .sys32_syslog
+       .llong .compat_sys_syslog
        .llong .compat_sys_setitimer
-       .llong .compat_sys_getitimer            /* 105 */
+       .llong .compat_sys_getitimer    /* 105 */
        .llong .compat_sys_newstat
        .llong .compat_sys_newlstat
        .llong .compat_sys_newfstat
-       .llong .sys32_uname
+       .llong .sys_uname
        .llong .sys_ni_syscall          /* 110 old iopl syscall */
        .llong .sys_vhangup
        .llong .sys_ni_syscall          /* old idle syscall */
        .llong .sys_ni_syscall          /* old vm86 syscall */
        .llong .compat_sys_wait4
        .llong .sys_swapoff             /* 115 */
-       .llong .sys32_sysinfo
+       .llong .compat_sys_sysinfo
        .llong .sys32_ipc
        .llong .sys_fsync
        .llong .ppc32_sigreturn
        .llong .ppc_clone               /* 120 */
-       .llong .sys32_setdomainname
-       .llong .ppc64_newuname
+       .llong .compat_sys_setdomainname
+       .llong .ppc_newuname
        .llong .sys_ni_syscall          /* old modify_ldt syscall */
-       .llong .sys32_adjtimex
+       .llong .compat_sys_adjtimex
        .llong .sys_mprotect            /* 125 */
        .llong .compat_sys_sigprocmask
        .llong .sys_ni_syscall          /* old create_module syscall */
@@ -1084,36 +1054,36 @@ _GLOBAL(sys_call_table32)
        .llong .sys_delete_module
        .llong .sys_ni_syscall          /* 130 old get_kernel_syms syscall */
        .llong .sys_quotactl
-       .llong .sys32_getpgid
+       .llong .compat_sys_getpgid
        .llong .sys_fchdir
        .llong .sys_bdflush
-       .llong .sys32_sysfs             /* 135 */
+       .llong .compat_sys_sysfs        /* 135 */
        .llong .ppc64_personality
        .llong .sys_ni_syscall          /* for afs_syscall */
        .llong .sys_setfsuid
        .llong .sys_setfsgid
        .llong .sys_llseek              /* 140 */
-        .llong .sys32_getdents
+        .llong .compat_sys_getdents
        .llong .ppc32_select
        .llong .sys_flock
        .llong .sys_msync
        .llong .compat_sys_readv        /* 145 */
        .llong .compat_sys_writev
-       .llong .sys32_getsid
+       .llong .compat_sys_getsid
        .llong .sys_fdatasync
-       .llong .sys32_sysctl
+       .llong .compat_sys_sysctl
        .llong .sys_mlock               /* 150 */
        .llong .sys_munlock
        .llong .sys_mlockall
        .llong .sys_munlockall
-       .llong .sys32_sched_setparam
-       .llong .sys32_sched_getparam    /* 155 */
-       .llong .sys32_sched_setscheduler
-       .llong .sys32_sched_getscheduler
+       .llong .compat_sys_sched_setparam
+       .llong .compat_sys_sched_getparam       /* 155 */
+       .llong .compat_sys_sched_setscheduler
+       .llong .compat_sys_sched_getscheduler
        .llong .sys_sched_yield
-       .llong .sys32_sched_get_priority_max
-       .llong .sys32_sched_get_priority_min  /* 160 */
-       .llong .sys32_sched_rr_get_interval
+       .llong .compat_sys_sched_get_priority_max
+       .llong .compat_sys_sched_get_priority_min  /* 160 */
+       .llong .compat_sys_sched_rr_get_interval
        .llong .compat_sys_nanosleep
        .llong .sys_mremap
        .llong .sys_setresuid
@@ -1123,36 +1093,36 @@ _GLOBAL(sys_call_table32)
        .llong .compat_sys_nfsservctl
        .llong .sys_setresgid
        .llong .sys_getresgid           /* 170 */
-       .llong .sys32_prctl
+       .llong .compat_sys_prctl
        .llong .ppc32_rt_sigreturn
-       .llong .sys32_rt_sigaction
-       .llong .sys32_rt_sigprocmask
-       .llong .sys32_rt_sigpending     /* 175 */
+       .llong .compat_sys_rt_sigaction
+       .llong .compat_sys_rt_sigprocmask
+       .llong .compat_sys_rt_sigpending     /* 175 */
        .llong .compat_sys_rt_sigtimedwait
-       .llong .sys32_rt_sigqueueinfo
+       .llong .compat_sys_rt_sigqueueinfo
        .llong .ppc32_rt_sigsuspend
-       .llong .sys32_pread64
-       .llong .sys32_pwrite64          /* 180 */
+       .llong .compat_sys_pread64
+       .llong .compat_sys_pwrite64     /* 180 */
        .llong .sys_chown
        .llong .sys_getcwd
        .llong .sys_capget
        .llong .sys_capset
-       .llong .sys32_sigaltstack       /* 185 */
-       .llong .sys32_sendfile
+       .llong .compat_sys_sigaltstack  /* 185 */
+       .llong .compat_sys_sendfile
        .llong .sys_ni_syscall          /* reserved for streams1 */
        .llong .sys_ni_syscall          /* reserved for streams2 */
        .llong .ppc_vfork
        .llong .compat_sys_getrlimit            /* 190 */
-       .llong .sys32_readahead
-       .llong .sys32_mmap2
-       .llong .sys32_truncate64
-       .llong .sys32_ftruncate64
+       .llong .compat_sys_readahead
+       .llong .compat_sys_mmap2
+       .llong .compat_sys_truncate64
+       .llong .compat_sys_ftruncate64
        .llong .sys_stat64              /* 195 */
        .llong .sys_lstat64
        .llong .sys_fstat64
-       .llong .sys32_pciconfig_read
-       .llong .sys32_pciconfig_write
-       .llong .sys32_pciconfig_iobase  /* 200 - pciconfig_iobase */
+       .llong .compat_sys_pciconfig_read
+       .llong .compat_sys_pciconfig_write
+       .llong .compat_sys_pciconfig_iobase     /* 200 - pciconfig_iobase */
        .llong .sys_ni_syscall          /* reserved for MacOnLinux */
        .llong .sys_getdents64
        .llong .sys_pivot_root
@@ -1178,7 +1148,7 @@ _GLOBAL(sys_call_table32)
        .llong .compat_sys_sched_getaffinity
        .llong .sys_ni_syscall
        .llong .sys_ni_syscall          /* 225 - reserved for tux */
-       .llong .sys32_sendfile64
+       .llong .compat_sys_sendfile64
        .llong .compat_sys_io_setup
        .llong .sys_io_destroy
        .llong .compat_sys_io_getevents
@@ -1197,16 +1167,16 @@ _GLOBAL(sys_call_table32)
        .llong .compat_sys_timer_gettime
        .llong .sys_timer_getoverrun
        .llong .sys_timer_delete
-       .llong .compat_sys_clock_settime        /* 245 */
+       .llong .compat_sys_clock_settime/* 245 */
        .llong .compat_sys_clock_gettime
        .llong .compat_sys_clock_getres
        .llong .compat_sys_clock_nanosleep
        .llong .ppc32_swapcontext
-       .llong .sys32_tgkill            /* 250 */
-       .llong .sys32_utimes
+       .llong .compat_sys_tgkill       /* 250 */
+       .llong .compat_sys_utimes
        .llong .compat_sys_statfs64
        .llong .compat_sys_fstatfs64
-       .llong .ppc32_fadvise64_64      /* 32bit only fadvise64_64 */
+       .llong .ppc_fadvise64_64        /* 32bit only fadvise64_64 */
        .llong .ppc_rtas                /* 255 */
        .llong .sys_ni_syscall          /* 256 reserved for sys_debug_setcontext */
        .llong .sys_ni_syscall          /* 257 reserved for vserver */
@@ -1221,12 +1191,12 @@ _GLOBAL(sys_call_table32)
        .llong .compat_sys_mq_notify
        .llong .compat_sys_mq_getsetattr
        .llong .compat_sys_kexec_load
-       .llong .sys32_add_key
-       .llong .sys32_request_key       /* 270 */
+       .llong .compat_sys_add_key
+       .llong .compat_sys_request_key  /* 270 */
        .llong .compat_sys_keyctl
        .llong .compat_sys_waitid
-       .llong .sys32_ioprio_set
-       .llong .sys32_ioprio_get
+       .llong .compat_sys_ioprio_set
+       .llong .compat_sys_ioprio_get
        .llong .sys_inotify_init        /* 275 */
        .llong .sys_inotify_add_watch
        .llong .sys_inotify_rm_watch
@@ -1355,7 +1325,7 @@ _GLOBAL(sys_call_table)
        .llong .sys_ni_syscall
        .llong .ppc_clone               /* 120 */
        .llong .sys_setdomainname
-       .llong .ppc64_newuname
+       .llong .ppc_newuname
        .llong .sys_ni_syscall          /* old modify_ldt syscall */
        .llong .sys_adjtimex
        .llong .sys_mprotect            /* 125 */
index ff4be1d..b2fb674 100644 (file)
@@ -31,8 +31,7 @@
 #include <asm/irq.h>
 #include <asm/machdep.h>
 #include <asm/udbg.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -881,9 +880,9 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
 }
 
 void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
-                                           struct device_node *dev)
+                                           struct device_node *dev, int prim)
 {
-       unsigned int *ranges;
+       unsigned int *ranges, pci_space;
        unsigned long size;
        int rlen = 0;
        int memno = 0;
@@ -906,16 +905,39 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
        ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
        while ((rlen -= np * sizeof(unsigned int)) >= 0) {
                res = NULL;
-               pci_addr = (unsigned long)ranges[1] << 32 | ranges[2];
+               pci_space = ranges[0];
+               pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
 
                cpu_phys_addr = ranges[3];
-               if (na == 2)
-                       cpu_phys_addr = cpu_phys_addr << 32 | ranges[4];
+               if (na >= 2)
+                       cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
 
-               size = (unsigned long)ranges[na+3] << 32 | ranges[na+4];
+               size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
+               ranges += np;
                if (size == 0)
                        continue;
-               switch ((ranges[0] >> 24) & 0x3) {
+
+               /* Now consume following elements while they are contiguous */
+               while (rlen >= np * sizeof(unsigned int)) {
+                       unsigned long addr, phys;
+
+                       if (ranges[0] != pci_space)
+                               break;
+                       addr = ((unsigned long)ranges[1] << 32) | ranges[2];
+                       phys = ranges[3];
+                       if (na >= 2)
+                               phys = (phys << 32) | ranges[4];
+                       if (addr != pci_addr + size ||
+                           phys != cpu_phys_addr + size)
+                               break;
+
+                       size += ((unsigned long)ranges[na+3] << 32)
+                               | ranges[na+4];
+                       ranges += np;
+                       rlen -= np * sizeof(unsigned int);
+               }
+
+               switch ((pci_space >> 24) & 0x3) {
                case 1:         /* I/O space */
                        hose->io_base_phys = cpu_phys_addr;
                        hose->pci_io_size = size;
@@ -949,7 +971,6 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
                        res->sibling = NULL;
                        res->child = NULL;
                }
-               ranges += np;
        }
 }
 
index 54055c8..e1a32f8 100644 (file)
@@ -27,8 +27,7 @@
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/abs_addr.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
                                   dma_addr_t *dma_handle, gfp_t flag)
index a86389d..493bbe4 100644 (file)
@@ -30,8 +30,7 @@
 #include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/pSeries_reconfig.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 /*
  * Traverse_func that inits the PCI fields of the device node.
index d9e33b7..bdf15db 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * arch/ppc64/kernel/pci_iommu.c
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- * 
- * Rewrite, cleanup, new allocation schemes: 
+ *
+ * Rewrite, cleanup, new allocation schemes:
  * Copyright (C) 2004 Olof Johansson, IBM Corporation
  *
  * Dynamic DMA mapping support, platform-independent parts.
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
 
-#include <linux/config.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <asm/iommu.h>
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
-#include "pci.h"
-
-#ifdef CONFIG_PPC_ISERIES
-#include <asm/iSeries/iSeries_pci.h>
-#endif /* CONFIG_PPC_ISERIES */
+#include <asm/ppc-pci.h>
 
 /*
  * We can use ->sysdata directly and avoid the extra work in
@@ -61,13 +56,7 @@ static inline struct iommu_table *devnode_table(struct device *dev)
        } else
                pdev = to_pci_dev(dev);
 
-#ifdef CONFIG_PPC_ISERIES
-       return ISERIES_DEVNODE(pdev)->iommu_table;
-#endif /* CONFIG_PPC_ISERIES */
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
        return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
-#endif /* CONFIG_PPC_MULTIPLATFORM */
 }
 
 
diff --git a/arch/ppc64/kernel/pmac.h b/arch/ppc64/kernel/pmac.h
deleted file mode 100644 (file)
index 40e1c50..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __PMAC_H__
-#define __PMAC_H__
-
-#include <linux/pci.h>
-#include <linux/ide.h>
-
-/*
- * Declaration for the various functions exported by the
- * pmac_* files. Mostly for use by pmac_setup
- */
-
-extern void pmac_get_boot_time(struct rtc_time *tm);
-extern void pmac_get_rtc_time(struct rtc_time *tm);
-extern int  pmac_set_rtc_time(struct rtc_time *tm);
-extern void pmac_read_rtc_time(void);
-extern void pmac_calibrate_decr(void);
-
-extern void pmac_pcibios_fixup(void);
-extern void pmac_pci_init(void);
-extern void pmac_setup_pci_dma(void);
-extern void pmac_check_ht_link(void);
-
-extern void pmac_setup_smp(void);
-
-extern unsigned long pmac_ide_get_base(int index);
-extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
-       unsigned long data_port, unsigned long ctrl_port, int *irq);
-
-extern void pmac_nvram_init(void);
-
-#endif /* __PMAC_H__ */
diff --git a/arch/ppc64/kernel/pmac_feature.c b/arch/ppc64/kernel/pmac_feature.c
deleted file mode 100644 (file)
index eb4e6c3..0000000
+++ /dev/null
@@ -1,767 +0,0 @@
-/*
- *  arch/ppc/platforms/pmac_feature.c
- *
- *  Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
- *                          Ben. Herrenschmidt (benh@kernel.crashing.org)
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- *
- *  TODO:
- *
- *   - Replace mdelay with some schedule loop if possible
- *   - Shorten some obfuscated delays on some routines (like modem
- *     power)
- *   - Refcount some clocks (see darwin)
- *   - Split split split...
- *
- */
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/adb.h>
-#include <linux/pmu.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <asm/sections.h>
-#include <asm/errno.h>
-#include <asm/keylargo.h>
-#include <asm/uninorth.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/dbdma.h>
-#include <asm/pci-bridge.h>
-#include <asm/pmac_low_i2c.h>
-
-#undef DEBUG_FEATURE
-
-#ifdef DEBUG_FEATURE
-#define DBG(fmt...) printk(KERN_DEBUG fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-/*
- * We use a single global lock to protect accesses. Each driver has
- * to take care of its own locking
- */
-static DEFINE_SPINLOCK(feature_lock  __pmacdata);
-
-#define LOCK(flags)    spin_lock_irqsave(&feature_lock, flags);
-#define UNLOCK(flags)  spin_unlock_irqrestore(&feature_lock, flags);
-
-
-/*
- * Instance of some macio stuffs
- */
-struct macio_chip macio_chips[MAX_MACIO_CHIPS]  __pmacdata;
-
-struct macio_chip* __pmac macio_find(struct device_node* child, int type)
-{
-       while(child) {
-               int     i;
-
-               for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
-                       if (child == macio_chips[i].of_node &&
-                           (!type || macio_chips[i].type == type))
-                               return &macio_chips[i];
-               child = child->parent;
-       }
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(macio_find);
-
-static const char* macio_names[] __pmacdata =
-{
-       "Unknown",
-       "Grand Central",
-       "OHare",
-       "OHareII",
-       "Heathrow",
-       "Gatwick",
-       "Paddington",
-       "Keylargo",
-       "Pangea",
-       "Intrepid",
-       "K2"
-};
-
-
-
-/*
- * Uninorth reg. access. Note that Uni-N regs are big endian
- */
-
-#define UN_REG(r)      (uninorth_base + ((r) >> 2))
-#define UN_IN(r)       (in_be32(UN_REG(r)))
-#define UN_OUT(r,v)    (out_be32(UN_REG(r), (v)))
-#define UN_BIS(r,v)    (UN_OUT((r), UN_IN(r) | (v)))
-#define UN_BIC(r,v)    (UN_OUT((r), UN_IN(r) & ~(v)))
-
-static struct device_node* uninorth_node __pmacdata;
-static u32* uninorth_base __pmacdata;
-static u32 uninorth_rev __pmacdata;
-static void *u3_ht;
-
-extern struct device_node *k2_skiplist[2];
-
-/*
- * For each motherboard family, we have a table of functions pointers
- * that handle the various features.
- */
-
-typedef long (*feature_call)(struct device_node* node, long param, long value);
-
-struct feature_table_entry {
-       unsigned int    selector;
-       feature_call    function;
-};
-
-struct pmac_mb_def
-{
-       const char*                     model_string;
-       const char*                     model_name;
-       int                             model_id;
-       struct feature_table_entry*     features;
-       unsigned long                   board_flags;
-};
-static struct pmac_mb_def pmac_mb __pmacdata;
-
-/*
- * Here are the chip specific feature functions
- */
-
-
-static long __pmac g5_read_gpio(struct device_node* node, long param, long value)
-{
-       struct macio_chip* macio = &macio_chips[0];
-
-       return MACIO_IN8(param);
-}
-
-
-static long __pmac g5_write_gpio(struct device_node* node, long param, long value)
-{
-       struct macio_chip* macio = &macio_chips[0];
-
-       MACIO_OUT8(param, (u8)(value & 0xff));
-       return 0;
-}
-
-static long __pmac g5_gmac_enable(struct device_node* node, long param, long value)
-{
-       struct macio_chip* macio = &macio_chips[0];
-       unsigned long flags;
-
-       if (node == NULL)
-               return -ENODEV;
-
-       LOCK(flags);
-       if (value) {
-               MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
-               mb();
-               k2_skiplist[0] = NULL;
-       } else {
-               k2_skiplist[0] = node;
-               mb();
-               MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
-       }
-       
-       UNLOCK(flags);
-       mdelay(1);
-
-       return 0;
-}
-
-static long __pmac g5_fw_enable(struct device_node* node, long param, long value)
-{
-       struct macio_chip* macio = &macio_chips[0];
-       unsigned long flags;
-
-       if (node == NULL)
-               return -ENODEV;
-
-       LOCK(flags);
-       if (value) {
-               MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
-               mb();
-               k2_skiplist[1] = NULL;
-       } else {
-               k2_skiplist[1] = node;
-               mb();
-               MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
-       }
-       
-       UNLOCK(flags);
-       mdelay(1);
-
-       return 0;
-}
-
-static long __pmac g5_mpic_enable(struct device_node* node, long param, long value)
-{
-       unsigned long flags;
-
-       if (node->parent == NULL || strcmp(node->parent->name, "u3"))
-               return 0;
-
-       LOCK(flags);
-       UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
-       UNLOCK(flags);
-
-       return 0;
-}
-
-static long __pmac g5_eth_phy_reset(struct device_node* node, long param, long value)
-{
-       struct macio_chip* macio = &macio_chips[0];
-       struct device_node *phy;
-       int need_reset;
-
-       /*
-        * We must not reset the combo PHYs, only the BCM5221 found in
-        * the iMac G5.
-        */
-       phy = of_get_next_child(node, NULL);
-       if (!phy)
-               return -ENODEV;
-       need_reset = device_is_compatible(phy, "B5221");
-       of_node_put(phy);
-       if (!need_reset)
-               return 0;
-
-       /* PHY reset is GPIO 29, not in device-tree unfortunately */
-       MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
-                  KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
-       /* Thankfully, this is now always called at a time when we can
-        * schedule by sungem.
-        */
-       msleep(10);
-       MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
-
-       return 0;
-}
-
-static long __pmac g5_i2s_enable(struct device_node *node, long param, long value)
-{
-       /* Very crude implementation for now */
-       struct macio_chip* macio = &macio_chips[0];
-       unsigned long flags;
-
-       if (value == 0)
-               return 0; /* don't disable yet */
-
-       LOCK(flags);
-       MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
-                 KL3_I2S0_CLK18_ENABLE);
-       udelay(10);
-       MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
-                 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
-       udelay(10);
-       MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
-       UNLOCK(flags);
-       udelay(10);
-
-       return 0;
-}
-
-
-#ifdef CONFIG_SMP
-static long __pmac g5_reset_cpu(struct device_node* node, long param, long value)
-{
-       unsigned int reset_io = 0;
-       unsigned long flags;
-       struct macio_chip* macio;
-       struct device_node* np;
-
-       macio = &macio_chips[0];
-       if (macio->type != macio_keylargo2)
-               return -ENODEV;
-
-       np = find_path_device("/cpus");
-       if (np == NULL)
-               return -ENODEV;
-       for (np = np->child; np != NULL; np = np->sibling) {
-               u32* num = (u32 *)get_property(np, "reg", NULL);
-               u32* rst = (u32 *)get_property(np, "soft-reset", NULL);
-               if (num == NULL || rst == NULL)
-                       continue;
-               if (param == *num) {
-                       reset_io = *rst;
-                       break;
-               }
-       }
-       if (np == NULL || reset_io == 0)
-               return -ENODEV;
-
-       LOCK(flags);
-       MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
-       (void)MACIO_IN8(reset_io);
-       udelay(1);
-       MACIO_OUT8(reset_io, 0);
-       (void)MACIO_IN8(reset_io);
-       UNLOCK(flags);
-
-       return 0;
-}
-#endif /* CONFIG_SMP */
-
-/*
- * This can be called from pmac_smp so isn't static
- *
- * This takes the second CPU off the bus on dual CPU machines
- * running UP
- */
-void __pmac g5_phy_disable_cpu1(void)
-{
-       UN_OUT(U3_API_PHY_CONFIG_1, 0);
-}
-
-static long __pmac generic_get_mb_info(struct device_node* node, long param, long value)
-{
-       switch(param) {
-               case PMAC_MB_INFO_MODEL:
-                       return pmac_mb.model_id;
-               case PMAC_MB_INFO_FLAGS:
-                       return pmac_mb.board_flags;
-               case PMAC_MB_INFO_NAME:                 
-                       /* hack hack hack... but should work */
-                       *((const char **)value) = pmac_mb.model_name;
-                       return 0;
-       }
-       return -EINVAL;
-}
-
-
-/*
- * Table definitions
- */
-
-/* Used on any machine
- */
-static struct feature_table_entry any_features[]  __pmacdata = {
-       { PMAC_FTR_GET_MB_INFO,         generic_get_mb_info },
-       { 0, NULL }
-};
-
-/* G5 features
- */
-static struct feature_table_entry g5_features[]  __pmacdata = {
-       { PMAC_FTR_GMAC_ENABLE,         g5_gmac_enable },
-       { PMAC_FTR_1394_ENABLE,         g5_fw_enable },
-       { PMAC_FTR_ENABLE_MPIC,         g5_mpic_enable },
-       { PMAC_FTR_READ_GPIO,           g5_read_gpio },
-       { PMAC_FTR_WRITE_GPIO,          g5_write_gpio },
-       { PMAC_FTR_GMAC_PHY_RESET,      g5_eth_phy_reset },
-       { PMAC_FTR_SOUND_CHIP_ENABLE,   g5_i2s_enable },
-#ifdef CONFIG_SMP
-       { PMAC_FTR_RESET_CPU,           g5_reset_cpu },
-#endif /* CONFIG_SMP */
-       { 0, NULL }
-};
-
-static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
-       {       "PowerMac7,2",                  "PowerMac G5",
-               PMAC_TYPE_POWERMAC_G5,          g5_features,
-               0,
-       },
-       {       "PowerMac7,3",                  "PowerMac G5",
-               PMAC_TYPE_POWERMAC_G5,          g5_features,
-               0,
-       },
-       {       "PowerMac8,1",                  "iMac G5",
-               PMAC_TYPE_IMAC_G5,              g5_features,
-               0,
-       },
-       {       "PowerMac9,1",                  "PowerMac G5",
-               PMAC_TYPE_POWERMAC_G5_U3L,      g5_features,
-               0,
-       },
-       {       "RackMac3,1",                   "XServe G5",
-               PMAC_TYPE_XSERVE_G5,            g5_features,
-               0,
-       },
-};
-
-/*
- * The toplevel feature_call callback
- */
-long __pmac pmac_do_feature_call(unsigned int selector, ...)
-{
-       struct device_node* node;
-       long param, value;
-       int i;
-       feature_call func = NULL;
-       va_list args;
-
-       if (pmac_mb.features)
-               for (i=0; pmac_mb.features[i].function; i++)
-                       if (pmac_mb.features[i].selector == selector) {
-                               func = pmac_mb.features[i].function;
-                               break;
-                       }
-       if (!func)
-               for (i=0; any_features[i].function; i++)
-                       if (any_features[i].selector == selector) {
-                               func = any_features[i].function;
-                               break;
-                       }
-       if (!func)
-               return -ENODEV;
-
-       va_start(args, selector);
-       node = (struct device_node*)va_arg(args, void*);
-       param = va_arg(args, long);
-       value = va_arg(args, long);
-       va_end(args);
-
-       return func(node, param, value);
-}
-
-static int __init probe_motherboard(void)
-{
-       int i;
-       struct macio_chip* macio = &macio_chips[0];
-       const char* model = NULL;
-       struct device_node *dt;
-
-       /* Lookup known motherboard type in device-tree. First try an
-        * exact match on the "model" property, then try a "compatible"
-        * match is none is found.
-        */
-       dt = find_devices("device-tree");
-       if (dt != NULL)
-               model = (const char *) get_property(dt, "model", NULL);
-       for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
-           if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
-               pmac_mb = pmac_mb_defs[i];
-               goto found;
-           }
-       }
-       for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
-           if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
-               pmac_mb = pmac_mb_defs[i];
-               goto found;
-           }
-       }
-
-       /* Fallback to selection depending on mac-io chip type */
-       switch(macio->type) {
-       case macio_keylargo2:
-               pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
-               pmac_mb.model_name = "Unknown K2-based";
-               pmac_mb.features = g5_features;
-               
-       default:
-               return -ENODEV;
-       }
-found:
-       /* Check for "mobile" machine */
-       if (model && (strncmp(model, "PowerBook", 9) == 0
-                  || strncmp(model, "iBook", 5) == 0))
-               pmac_mb.board_flags |= PMAC_MB_MOBILE;
-
-
-       printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
-       return 0;
-}
-
-/* Initialize the Core99 UniNorth host bridge and memory controller
- */
-static void __init probe_uninorth(void)
-{
-       uninorth_node = of_find_node_by_name(NULL, "u3");
-       if (uninorth_node && uninorth_node->n_addrs > 0) {
-               /* Small hack until I figure out if parsing in prom.c is correct. I should
-                * get rid of those pre-parsed junk anyway
-                */
-               unsigned long address = uninorth_node->addrs[0].address;
-               uninorth_base = ioremap(address, 0x40000);
-               uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
-               u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
-       } else
-               uninorth_node = NULL;
-
-       if (!uninorth_node)
-               return;
-
-       printk(KERN_INFO "Found U3 memory controller & host bridge, revision: %d\n",
-              uninorth_rev);
-       printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
-
-}
-
-static void __init probe_one_macio(const char* name, const char* compat, int type)
-{
-       struct device_node*     node;
-       int                     i;
-       volatile u32*           base;
-       u32*                    revp;
-
-       node = find_devices(name);
-       if (!node || !node->n_addrs)
-               return;
-       if (compat)
-               do {
-                       if (device_is_compatible(node, compat))
-                               break;
-                       node = node->next;
-               } while (node);
-       if (!node)
-               return;
-       for(i=0; i<MAX_MACIO_CHIPS; i++) {
-               if (!macio_chips[i].of_node)
-                       break;
-               if (macio_chips[i].of_node == node)
-                       return;
-       }
-       if (i >= MAX_MACIO_CHIPS) {
-               printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
-               printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
-               return;
-       }
-       base = (volatile u32*)ioremap(node->addrs[0].address, node->addrs[0].size);
-       if (!base) {
-               printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
-               return;
-       }
-       if (type == macio_keylargo) {
-               u32* did = (u32 *)get_property(node, "device-id", NULL);
-               if (*did == 0x00000025)
-                       type = macio_pangea;
-               if (*did == 0x0000003e)
-                       type = macio_intrepid;
-       }
-       macio_chips[i].of_node  = node;
-       macio_chips[i].type     = type;
-       macio_chips[i].base     = base;
-       macio_chips[i].flags    = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON;
-       macio_chips[i].name     = macio_names[type];
-       revp = (u32 *)get_property(node, "revision-id", NULL);
-       if (revp)
-               macio_chips[i].rev = *revp;
-       printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
-               macio_names[type], macio_chips[i].rev, macio_chips[i].base);
-}
-
-static int __init
-probe_macios(void)
-{
-       probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
-
-       macio_chips[0].lbus.index = 0;
-       macio_chips[1].lbus.index = 1;
-
-       return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
-}
-
-static void __init
-set_initial_features(void)
-{
-       struct device_node *np;
-
-       if (macio_chips[0].type == macio_keylargo2) {
-#ifndef CONFIG_SMP
-               /* On SMP machines running UP, we have the second CPU eating
-                * bus cycles. We need to take it off the bus. This is done
-                * from pmac_smp for SMP kernels running on one CPU
-                */
-               np = of_find_node_by_type(NULL, "cpu");
-               if (np != NULL)
-                       np = of_find_node_by_type(np, "cpu");
-               if (np != NULL) {
-                       g5_phy_disable_cpu1();
-                       of_node_put(np);
-               }
-#endif /* CONFIG_SMP */
-               /* Enable GMAC for now for PCI probing. It will be disabled
-                * later on after PCI probe
-                */
-               np = of_find_node_by_name(NULL, "ethernet");
-               while(np) {
-                       if (device_is_compatible(np, "K2-GMAC"))
-                               g5_gmac_enable(np, 0, 1);
-                       np = of_find_node_by_name(np, "ethernet");
-               }
-
-               /* Enable FW before PCI probe. Will be disabled later on
-                * Note: We should have a batter way to check that we are
-                * dealing with uninorth internal cell and not a PCI cell
-                * on the external PCI. The code below works though.
-                */
-               np = of_find_node_by_name(NULL, "firewire");
-               while(np) {
-                       if (device_is_compatible(np, "pci106b,5811")) {
-                               macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
-                               g5_fw_enable(np, 0, 1);
-                       }
-                       np = of_find_node_by_name(np, "firewire");
-               }
-       }
-}
-
-void __init
-pmac_feature_init(void)
-{
-       /* Detect the UniNorth memory controller */
-       probe_uninorth();
-
-       /* Probe mac-io controllers */
-       if (probe_macios()) {
-               printk(KERN_WARNING "No mac-io chip found\n");
-               return;
-       }
-
-       /* Setup low-level i2c stuffs */
-       pmac_init_low_i2c();
-
-       /* Probe machine type */
-       if (probe_motherboard())
-               printk(KERN_WARNING "Unknown PowerMac !\n");
-
-       /* Set some initial features (turn off some chips that will
-        * be later turned on)
-        */
-       set_initial_features();
-}
-
-int __init pmac_feature_late_init(void)
-{
-#if 0
-       struct device_node* np;
-
-       /* Request some resources late */
-       if (uninorth_node)
-               request_OF_resource(uninorth_node, 0, NULL);
-       np = find_devices("hammerhead");
-       if (np)
-               request_OF_resource(np, 0, NULL);
-       np = find_devices("interrupt-controller");
-       if (np)
-               request_OF_resource(np, 0, NULL);
-#endif
-       return 0;
-}
-
-device_initcall(pmac_feature_late_init);
-
-#if 0
-static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
-{
-       int     freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
-       int     bits[8] = { 8,16,0,32,2,4,0,0 };
-       int     freq = (frq >> 8) & 0xf;
-
-       if (freqs[freq] == 0)
-               printk("%s: Unknown HT link frequency %x\n", name, freq);
-       else
-               printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
-                      name, freqs[freq],
-                      bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
-}
-#endif
-
-void __init pmac_check_ht_link(void)
-{
-#if 0 /* Disabled for now */
-       u32     ufreq, freq, ucfg, cfg;
-       struct device_node *pcix_node;
-       struct pci_dn *pdn;
-       u8      px_bus, px_devfn;
-       struct pci_controller *px_hose;
-
-       (void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
-       ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
-       ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
-       dump_HT_speeds("U3 HyperTransport", cfg, freq);
-
-       pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
-       if (pcix_node == NULL) {
-               printk("No PCI-X bridge found\n");
-               return;
-       }
-       pdn = pcix_node->data;
-       px_hose = pdn->phb;
-       px_bus = pdn->busno;
-       px_devfn = pdn->devfn;
-       
-       early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
-       early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
-       dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
-       early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
-       early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
-       dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
-#endif
-}
-
-/*
- * Early video resume hook
- */
-
-static void (*pmac_early_vresume_proc)(void *data) __pmacdata;
-static void *pmac_early_vresume_data __pmacdata;
-
-void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
-{
-       if (_machine != _MACH_Pmac)
-               return;
-       preempt_disable();
-       pmac_early_vresume_proc = proc;
-       pmac_early_vresume_data = data;
-       preempt_enable();
-}
-EXPORT_SYMBOL(pmac_set_early_video_resume);
-
-
-/*
- * AGP related suspend/resume code
- */
-
-static struct pci_dev *pmac_agp_bridge __pmacdata;
-static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata;
-static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata;
-
-void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
-                                int (*suspend)(struct pci_dev *bridge),
-                                int (*resume)(struct pci_dev *bridge))
-{
-       if (suspend || resume) {
-               pmac_agp_bridge = bridge;
-               pmac_agp_suspend = suspend;
-               pmac_agp_resume = resume;
-               return;
-       }
-       if (bridge != pmac_agp_bridge)
-               return;
-       pmac_agp_suspend = pmac_agp_resume = NULL;
-       return;
-}
-EXPORT_SYMBOL(pmac_register_agp_pm);
-
-void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
-{
-       if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
-               return;
-       if (pmac_agp_bridge->bus != dev->bus)
-               return;
-       pmac_agp_suspend(pmac_agp_bridge);
-}
-EXPORT_SYMBOL(pmac_suspend_agp_for_card);
-
-void __pmac pmac_resume_agp_for_card(struct pci_dev *dev)
-{
-       if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
-               return;
-       if (pmac_agp_bridge->bus != dev->bus)
-               return;
-       pmac_agp_resume(pmac_agp_bridge);
-}
-EXPORT_SYMBOL(pmac_resume_agp_for_card);
diff --git a/arch/ppc64/kernel/pmac_pci.c b/arch/ppc64/kernel/pmac_pci.c
deleted file mode 100644 (file)
index dc40a0c..0000000
+++ /dev/null
@@ -1,793 +0,0 @@
-/*
- * Support for PCI bridges found on Power Macintoshes.
- * At present the "bandit" and "chaos" bridges are supported.
- * Fortunately you access configuration space in the same
- * way with either bridge.
- *
- * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
- * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-
-#include <asm/sections.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/iommu.h>
-
-#include "pci.h"
-#include "pmac.h"
-
-#define DEBUG
-
-#ifdef DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
-/* XXX Could be per-controller, but I don't think we risk anything by
- * assuming we won't have both UniNorth and Bandit */
-static int has_uninorth;
-static struct pci_controller *u3_agp;
-struct device_node *k2_skiplist[2];
-
-static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
-{
-       for (; node != 0;node = node->sibling) {
-               int * bus_range;
-               unsigned int *class_code;
-               int len;
-
-               /* For PCI<->PCI bridges or CardBus bridges, we go down */
-               class_code = (unsigned int *) get_property(node, "class-code", NULL);
-               if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
-                       (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
-                       continue;
-               bus_range = (int *) get_property(node, "bus-range", &len);
-               if (bus_range != NULL && len > 2 * sizeof(int)) {
-                       if (bus_range[1] > higher)
-                               higher = bus_range[1];
-               }
-               higher = fixup_one_level_bus_range(node->child, higher);
-       }
-       return higher;
-}
-
-/* This routine fixes the "bus-range" property of all bridges in the
- * system since they tend to have their "last" member wrong on macs
- *
- * Note that the bus numbers manipulated here are OF bus numbers, they
- * are not Linux bus numbers.
- */
-static void __init fixup_bus_range(struct device_node *bridge)
-{
-       int * bus_range;
-       int len;
-
-       /* Lookup the "bus-range" property for the hose */
-       bus_range = (int *) get_property(bridge, "bus-range", &len);
-       if (bus_range == NULL || len < 2 * sizeof(int)) {
-               printk(KERN_WARNING "Can't get bus-range for %s\n",
-                              bridge->full_name);
-               return;
-       }
-       bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
-}
-
-/*
- * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
- *
- * The "Bandit" version is present in all early PCI PowerMacs,
- * and up to the first ones using Grackle. Some machines may
- * have 2 bandit controllers (2 PCI busses).
- *
- * "Chaos" is used in some "Bandit"-type machines as a bridge
- * for the separate display bus. It is accessed the same
- * way as bandit, but cannot be probed for devices. It therefore
- * has its own config access functions.
- *
- * The "UniNorth" version is present in all Core99 machines
- * (iBook, G4, new IMacs, and all the recent Apple machines).
- * It contains 3 controllers in one ASIC.
- *
- * The U3 is the bridge used on G5 machines. It contains on
- * AGP bus which is dealt with the old UniNorth access routines
- * and an HyperTransport bus which uses its own set of access
- * functions.
- */
-
-#define MACRISC_CFA0(devfn, off)       \
-       ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
-       | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
-       | (((unsigned long)(off)) & 0xFCUL))
-
-#define MACRISC_CFA1(bus, devfn, off)  \
-       ((((unsigned long)(bus)) << 16) \
-       |(((unsigned long)(devfn)) << 8) \
-       |(((unsigned long)(off)) & 0xFCUL) \
-       |1UL)
-
-static unsigned long __pmac macrisc_cfg_access(struct pci_controller* hose,
-                                              u8 bus, u8 dev_fn, u8 offset)
-{
-       unsigned int caddr;
-
-       if (bus == hose->first_busno) {
-               if (dev_fn < (11 << 3))
-                       return 0;
-               caddr = MACRISC_CFA0(dev_fn, offset);
-       } else
-               caddr = MACRISC_CFA1(bus, dev_fn, offset);
-
-       /* Uninorth will return garbage if we don't read back the value ! */
-       do {
-               out_le32(hose->cfg_addr, caddr);
-       } while (in_le32(hose->cfg_addr) != caddr);
-
-       offset &= has_uninorth ? 0x07 : 0x03;
-       return ((unsigned long)hose->cfg_data) + offset;
-}
-
-static int __pmac macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
-                                     int offset, int len, u32 *val)
-{
-       struct pci_controller *hose;
-       unsigned long addr;
-
-       hose = pci_bus_to_host(bus);
-       if (hose == NULL)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
-       if (!addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       /*
-        * Note: the caller has already checked that offset is
-        * suitably aligned and that len is 1, 2 or 4.
-        */
-       switch (len) {
-       case 1:
-               *val = in_8((u8 *)addr);
-               break;
-       case 2:
-               *val = in_le16((u16 *)addr);
-               break;
-       default:
-               *val = in_le32((u32 *)addr);
-               break;
-       }
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int __pmac macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
-                                      int offset, int len, u32 val)
-{
-       struct pci_controller *hose;
-       unsigned long addr;
-
-       hose = pci_bus_to_host(bus);
-       if (hose == NULL)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
-       if (!addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       /*
-        * Note: the caller has already checked that offset is
-        * suitably aligned and that len is 1, 2 or 4.
-        */
-       switch (len) {
-       case 1:
-               out_8((u8 *)addr, val);
-               (void) in_8((u8 *)addr);
-               break;
-       case 2:
-               out_le16((u16 *)addr, val);
-               (void) in_le16((u16 *)addr);
-               break;
-       default:
-               out_le32((u32 *)addr, val);
-               (void) in_le32((u32 *)addr);
-               break;
-       }
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops macrisc_pci_ops =
-{
-       macrisc_read_config,
-       macrisc_write_config
-};
-
-/*
- * These versions of U3 HyperTransport config space access ops do not
- * implement self-view of the HT host yet
- */
-
-/*
- * This function deals with some "special cases" devices.
- *
- *  0 -> No special case
- *  1 -> Skip the device but act as if the access was successfull
- *       (return 0xff's on reads, eventually, cache config space
- *       accesses in a later version)
- * -1 -> Hide the device (unsuccessful acess)
- */
-static int u3_ht_skip_device(struct pci_controller *hose,
-                            struct pci_bus *bus, unsigned int devfn)
-{
-       struct device_node *busdn, *dn;
-       int i;
-
-       /* We only allow config cycles to devices that are in OF device-tree
-        * as we are apparently having some weird things going on with some
-        * revs of K2 on recent G5s
-        */
-       if (bus->self)
-               busdn = pci_device_to_OF_node(bus->self);
-       else
-               busdn = hose->arch_data;
-       for (dn = busdn->child; dn; dn = dn->sibling)
-               if (dn->data && PCI_DN(dn)->devfn == devfn)
-                       break;
-       if (dn == NULL)
-               return -1;
-
-       /*
-        * When a device in K2 is powered down, we die on config
-        * cycle accesses. Fix that here.
-        */
-       for (i=0; i<2; i++)
-               if (k2_skiplist[i] == dn)
-                       return 1;
-
-       return 0;
-}
-
-#define U3_HT_CFA0(devfn, off)         \
-               ((((unsigned long)devfn) << 8) | offset)
-#define U3_HT_CFA1(bus, devfn, off)    \
-               (U3_HT_CFA0(devfn, off) \
-               + (((unsigned long)bus) << 16) \
-               + 0x01000000UL)
-
-static unsigned long __pmac u3_ht_cfg_access(struct pci_controller* hose,
-                                            u8 bus, u8 devfn, u8 offset)
-{
-       if (bus == hose->first_busno) {
-               /* For now, we don't self probe U3 HT bridge */
-               if (PCI_SLOT(devfn) == 0)
-                       return 0;
-               return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
-       } else
-               return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
-}
-
-static int __pmac u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
-                                   int offset, int len, u32 *val)
-{
-       struct pci_controller *hose;
-       unsigned long addr;
-
-
-       hose = pci_bus_to_host(bus);      
-       if (hose == NULL)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
-       if (!addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       switch (u3_ht_skip_device(hose, bus, devfn)) {
-       case 0:
-               break;
-       case 1:
-               switch (len) {
-               case 1:
-                       *val = 0xff; break;
-               case 2:
-                       *val = 0xffff; break;
-               default:
-                       *val = 0xfffffffful; break;
-               }
-               return PCIBIOS_SUCCESSFUL;
-       default:
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       }
-
-       /*
-        * Note: the caller has already checked that offset is
-        * suitably aligned and that len is 1, 2 or 4.
-        */
-       switch (len) {
-       case 1:
-               *val = in_8((u8 *)addr);
-               break;
-       case 2:
-               *val = in_le16((u16 *)addr);
-               break;
-       default:
-               *val = in_le32((u32 *)addr);
-               break;
-       }
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int __pmac u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
-                                    int offset, int len, u32 val)
-{
-       struct pci_controller *hose;
-       unsigned long addr;
-
-       hose = pci_bus_to_host(bus);
-       if (hose == NULL)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
-       if (!addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       switch (u3_ht_skip_device(hose, bus, devfn)) {
-       case 0:
-               break;
-       case 1:
-               return PCIBIOS_SUCCESSFUL;
-       default:
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       }
-
-       /*
-        * Note: the caller has already checked that offset is
-        * suitably aligned and that len is 1, 2 or 4.
-        */
-       switch (len) {
-       case 1:
-               out_8((u8 *)addr, val);
-               (void) in_8((u8 *)addr);
-               break;
-       case 2:
-               out_le16((u16 *)addr, val);
-               (void) in_le16((u16 *)addr);
-               break;
-       default:
-               out_le32((u32 *)addr, val);
-               (void) in_le32((u32 *)addr);
-               break;
-       }
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops u3_ht_pci_ops =
-{
-       u3_ht_read_config,
-       u3_ht_write_config
-};
-
-static void __init setup_u3_agp(struct pci_controller* hose)
-{
-       /* On G5, we move AGP up to high bus number so we don't need
-        * to reassign bus numbers for HT. If we ever have P2P bridges
-        * on AGP, we'll have to move pci_assign_all_busses to the
-        * pci_controller structure so we enable it for AGP and not for
-        * HT childs.
-        * We hard code the address because of the different size of
-        * the reg address cell, we shall fix that by killing struct
-        * reg_property and using some accessor functions instead
-        */
-       hose->first_busno = 0xf0;
-       hose->last_busno = 0xff;
-       has_uninorth = 1;
-       hose->ops = &macrisc_pci_ops;
-       hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
-       hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
-
-       u3_agp = hose;
-}
-
-static void __init setup_u3_ht(struct pci_controller* hose)
-{
-       struct device_node *np = (struct device_node *)hose->arch_data;
-       int i, cur;
-
-       hose->ops = &u3_ht_pci_ops;
-
-       /* We hard code the address because of the different size of
-        * the reg address cell, we shall fix that by killing struct
-        * reg_property and using some accessor functions instead
-        */
-       hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
-
-       /*
-        * /ht node doesn't expose a "ranges" property, so we "remove" regions that
-        * have been allocated to AGP. So far, this version of the code doesn't assign
-        * any of the 0xfxxxxxxx "fine" memory regions to /ht.
-        * We need to fix that sooner or later by either parsing all child "ranges"
-        * properties or figuring out the U3 address space decoding logic and
-        * then read it's configuration register (if any).
-        */
-       hose->io_base_phys = 0xf4000000;
-       hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000);
-       isa_io_base = pci_io_base = (unsigned long) hose->io_base_virt;
-       hose->io_resource.name = np->full_name;
-       hose->io_resource.start = 0;
-       hose->io_resource.end = 0x003fffff;
-       hose->io_resource.flags = IORESOURCE_IO;
-       hose->pci_mem_offset = 0;
-       hose->first_busno = 0;
-       hose->last_busno = 0xef;
-       hose->mem_resources[0].name = np->full_name;
-       hose->mem_resources[0].start = 0x80000000;
-       hose->mem_resources[0].end = 0xefffffff;
-       hose->mem_resources[0].flags = IORESOURCE_MEM;
-
-       if (u3_agp == NULL) {
-               DBG("U3 has no AGP, using full resource range\n");
-               return;
-       }
-
-       /* We "remove" the AGP resources from the resources allocated to HT, that
-        * is we create "holes". However, that code does assumptions that so far
-        * happen to be true (cross fingers...), typically that resources in the
-        * AGP node are properly ordered
-        */
-       cur = 0;
-       for (i=0; i<3; i++) {
-               struct resource *res = &u3_agp->mem_resources[i];
-               if (res->flags != IORESOURCE_MEM)
-                       continue;
-               /* We don't care about "fine" resources */
-               if (res->start >= 0xf0000000)
-                       continue;
-               /* Check if it's just a matter of "shrinking" us in one direction */
-               if (hose->mem_resources[cur].start == res->start) {
-                       DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
-                           cur, hose->mem_resources[cur].start, res->end + 1);
-                       hose->mem_resources[cur].start = res->end + 1;
-                       continue;
-               }
-               if (hose->mem_resources[cur].end == res->end) {
-                       DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
-                           cur, hose->mem_resources[cur].end, res->start - 1);
-                       hose->mem_resources[cur].end = res->start - 1;
-                       continue;
-               }
-               /* No, it's not the case, we need a hole */
-               if (cur == 2) {
-                       /* not enough resources for a hole, we drop part of the range */
-                       printk(KERN_WARNING "Running out of resources for /ht host !\n");
-                       hose->mem_resources[cur].end = res->start - 1;
-                       continue;
-               }               
-               cur++;
-               DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
-                   cur-1, res->start - 1, cur, res->end + 1);
-               hose->mem_resources[cur].name = np->full_name;
-               hose->mem_resources[cur].flags = IORESOURCE_MEM;
-               hose->mem_resources[cur].start = res->end + 1;
-               hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
-               hose->mem_resources[cur-1].end = res->start - 1;
-       }
-}
-
-static void __init pmac_process_bridge_OF_ranges(struct pci_controller *hose,
-                          struct device_node *dev, int primary)
-{
-       static unsigned int static_lc_ranges[2024];
-       unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
-       unsigned int size;
-       int rlen = 0, orig_rlen;
-       int memno = 0;
-       struct resource *res;
-       int np, na = prom_n_addr_cells(dev);
-
-       np = na + 5;
-
-       /* First we try to merge ranges to fix a problem with some pmacs
-        * that can have more than 3 ranges, fortunately using contiguous
-        * addresses -- BenH
-        */
-       dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
-       if (!dt_ranges)
-               return;
-       /*      lc_ranges = alloc_bootmem(rlen);*/
-       lc_ranges = static_lc_ranges;
-       if (!lc_ranges)
-               return; /* what can we do here ? */
-       memcpy(lc_ranges, dt_ranges, rlen);
-       orig_rlen = rlen;
-
-       /* Let's work on a copy of the "ranges" property instead of damaging
-        * the device-tree image in memory
-        */
-       ranges = lc_ranges;
-       prev = NULL;
-       while ((rlen -= np * sizeof(unsigned int)) >= 0) {
-               if (prev) {
-                       if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
-                               (prev[2] + prev[na+4]) == ranges[2] &&
-                               (prev[na+2] + prev[na+4]) == ranges[na+2]) {
-                               prev[na+4] += ranges[na+4];
-                               ranges[0] = 0;
-                               ranges += np;
-                               continue;
-                       }
-               }
-               prev = ranges;
-               ranges += np;
-       }
-
-       /*
-        * The ranges property is laid out as an array of elements,
-        * each of which comprises:
-        *   cells 0 - 2:       a PCI address
-        *   cells 3 or 3+4:    a CPU physical address
-        *                      (size depending on dev->n_addr_cells)
-        *   cells 4+5 or 5+6:  the size of the range
-        */
-       ranges = lc_ranges;
-       rlen = orig_rlen;
-       while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
-               res = NULL;
-               size = ranges[na+4];
-               switch (ranges[0] >> 24) {
-               case 1:         /* I/O space */
-                       if (ranges[2] != 0)
-                               break;
-                       hose->io_base_phys = ranges[na+2];
-                       /* limit I/O space to 16MB */
-                       if (size > 0x01000000)
-                               size = 0x01000000;
-                       hose->io_base_virt = ioremap(ranges[na+2], size);
-                       if (primary)
-                               isa_io_base = (unsigned long) hose->io_base_virt;
-                       res = &hose->io_resource;
-                       res->flags = IORESOURCE_IO;
-                       res->start = ranges[2];
-                       break;
-               case 2:         /* memory space */
-                       memno = 0;
-                       if (ranges[1] == 0 && ranges[2] == 0
-                           && ranges[na+4] <= (16 << 20)) {
-                               /* 1st 16MB, i.e. ISA memory area */
-#if 0
-                               if (primary)
-                                       isa_mem_base = ranges[na+2];
-#endif
-                               memno = 1;
-                       }
-                       while (memno < 3 && hose->mem_resources[memno].flags)
-                               ++memno;
-                       if (memno == 0)
-                               hose->pci_mem_offset = ranges[na+2] - ranges[2];
-                       if (memno < 3) {
-                               res = &hose->mem_resources[memno];
-                               res->flags = IORESOURCE_MEM;
-                               res->start = ranges[na+2];
-                       }
-                       break;
-               }
-               if (res != NULL) {
-                       res->name = dev->full_name;
-                       res->end = res->start + size - 1;
-                       res->parent = NULL;
-                       res->sibling = NULL;
-                       res->child = NULL;
-               }
-               ranges += np;
-       }
-}
-
-/*
- * We assume that if we have a G3 powermac, we have one bridge called
- * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
- * if we have one or more bandit or chaos bridges, we don't have a MPC106.
- */
-static int __init add_bridge(struct device_node *dev)
-{
-       int len;
-       struct pci_controller *hose;
-       char* disp_name;
-       int *bus_range;
-       int primary = 1;
-       struct property *of_prop;
-
-       DBG("Adding PCI host bridge %s\n", dev->full_name);
-
-       bus_range = (int *) get_property(dev, "bus-range", &len);
-       if (bus_range == NULL || len < 2 * sizeof(int)) {
-               printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
-                       dev->full_name);
-       }
-
-       hose = alloc_bootmem(sizeof(struct pci_controller));
-       if (hose == NULL)
-               return -ENOMEM;
-       pci_setup_pci_controller(hose);
-
-       hose->arch_data = dev;
-       hose->first_busno = bus_range ? bus_range[0] : 0;
-       hose->last_busno = bus_range ? bus_range[1] : 0xff;
-
-       of_prop = alloc_bootmem(sizeof(struct property) +
-                               sizeof(hose->global_number));
-       if (of_prop) {
-               memset(of_prop, 0, sizeof(struct property));
-               of_prop->name = "linux,pci-domain";
-               of_prop->length = sizeof(hose->global_number);
-               of_prop->value = (unsigned char *)&of_prop[1];
-               memcpy(of_prop->value, &hose->global_number, sizeof(hose->global_number));
-               prom_add_property(dev, of_prop);
-       }
-
-       disp_name = NULL;
-       if (device_is_compatible(dev, "u3-agp")) {
-               setup_u3_agp(hose);
-               disp_name = "U3-AGP";
-               primary = 0;
-       } else if (device_is_compatible(dev, "u3-ht")) {
-               setup_u3_ht(hose);
-               disp_name = "U3-HT";
-               primary = 1;
-       }
-       printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
-               disp_name, hose->first_busno, hose->last_busno);
-
-       /* Interpret the "ranges" property */
-       /* This also maps the I/O region and sets isa_io/mem_base */
-       pmac_process_bridge_OF_ranges(hose, dev, primary);
-
-       /* Fixup "bus-range" OF property */
-       fixup_bus_range(dev);
-
-       return 0;
-}
-
-/*
- * We use our own read_irq_line here because PCI_INTERRUPT_PIN is
- * crap on some of Apple ASICs. We unconditionally use the Open Firmware
- * interrupt number as this is always right.
- */
-static int pmac_pci_read_irq_line(struct pci_dev *pci_dev)
-{
-       struct device_node *node;
-
-       node = pci_device_to_OF_node(pci_dev);
-       if (node == NULL)
-               return -1;
-       if (node->n_intrs == 0)
-               return -1;
-       pci_dev->irq = node->intrs[0].line;
-       pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq);
-
-       return 0;
-}
-
-void __init pmac_pcibios_fixup(void)
-{
-       struct pci_dev *dev = NULL;
-
-       for_each_pci_dev(dev)
-               pmac_pci_read_irq_line(dev);
-}
-
-static void __init pmac_fixup_phb_resources(void)
-{
-       struct pci_controller *hose, *tmp;
-       
-       list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
-               unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
-               hose->io_resource.start += offset;
-               hose->io_resource.end += offset;
-               printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
-                      hose->global_number,
-                      hose->io_resource.start, hose->io_resource.end);
-       }
-}
-
-void __init pmac_pci_init(void)
-{
-       struct device_node *np, *root;
-       struct device_node *ht = NULL;
-
-       /* Probe root PCI hosts, that is on U3 the AGP host and the
-        * HyperTransport host. That one is actually "kept" around
-        * and actually added last as it's resource management relies
-        * on the AGP resources to have been setup first
-        */
-       root = of_find_node_by_path("/");
-       if (root == NULL) {
-               printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n");
-               return;
-       }
-       for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
-               if (np->name == NULL)
-                       continue;
-               if (strcmp(np->name, "pci") == 0) {
-                       if (add_bridge(np) == 0)
-                               of_node_get(np);
-               }
-               if (strcmp(np->name, "ht") == 0) {
-                       of_node_get(np);
-                       ht = np;
-               }
-       }
-       of_node_put(root);
-
-       /* Now setup the HyperTransport host if we found any
-        */
-       if (ht && add_bridge(ht) != 0)
-               of_node_put(ht);
-
-       /* Fixup the IO resources on our host bridges as the common code
-        * does it only for childs of the host bridges
-        */
-       pmac_fixup_phb_resources();
-
-       /* Setup the linkage between OF nodes and PHBs */ 
-       pci_devs_phb_init();
-
-       /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
-        * assume there is no P2P bridge on the AGP bus, which should be a
-        * safe assumptions hopefully.
-        */
-       if (u3_agp) {
-               struct device_node *np = u3_agp->arch_data;
-               PCI_DN(np)->busno = 0xf0;
-               for (np = np->child; np; np = np->sibling)
-                       PCI_DN(np)->busno = 0xf0;
-       }
-
-       pmac_check_ht_link();
-
-       /* Tell pci.c to not use the common resource allocation mecanism */
-       pci_probe_only = 1;
-       
-       /* Allow all IO */
-       io_page_mask = -1;
-}
-
-/*
- * Disable second function on K2-SATA, it's broken
- * and disable IO BARs on first one
- */
-static void fixup_k2_sata(struct pci_dev* dev)
-{
-       int i;
-       u16 cmd;
-
-       if (PCI_FUNC(dev->devfn) > 0) {
-               pci_read_config_word(dev, PCI_COMMAND, &cmd);
-               cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
-               pci_write_config_word(dev, PCI_COMMAND, cmd);
-               for (i = 0; i < 6; i++) {
-                       dev->resource[i].start = dev->resource[i].end = 0;
-                       dev->resource[i].flags = 0;
-                       pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
-               }
-       } else {
-               pci_read_config_word(dev, PCI_COMMAND, &cmd);
-               cmd &= ~PCI_COMMAND_IO;
-               pci_write_config_word(dev, PCI_COMMAND, cmd);
-               for (i = 0; i < 5; i++) {
-                       dev->resource[i].start = dev->resource[i].end = 0;
-                       dev->resource[i].flags = 0;
-                       pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
-               }
-       }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
-
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
deleted file mode 100644 (file)
index fa8121d..0000000
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
- *  arch/ppc/platforms/setup.c
- *
- *  PowerPC version
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- *  Adapted for Power Macintosh by Paul Mackerras
- *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
- *
- *  Derived from "arch/alpha/kernel/setup.c"
- *    Copyright (C) 1995 Linus Torvalds
- *
- *  Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- *
- */
-
-/*
- * bootup setup stuff..
- */
-
-#undef DEBUG
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/tty.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/major.h>
-#include <linux/initrd.h>
-#include <linux/vt_kern.h>
-#include <linux/console.h>
-#include <linux/ide.h>
-#include <linux/pci.h>
-#include <linux/adb.h>
-#include <linux/cuda.h>
-#include <linux/pmu.h>
-#include <linux/irq.h>
-#include <linux/seq_file.h>
-#include <linux/root_dev.h>
-#include <linux/bitops.h>
-
-#include <asm/processor.h>
-#include <asm/sections.h>
-#include <asm/prom.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/pci-bridge.h>
-#include <asm/iommu.h>
-#include <asm/machdep.h>
-#include <asm/dma.h>
-#include <asm/btext.h>
-#include <asm/cputable.h>
-#include <asm/pmac_feature.h>
-#include <asm/time.h>
-#include <asm/of_device.h>
-#include <asm/lmb.h>
-#include <asm/smu.h>
-#include <asm/pmc.h>
-
-#include "pmac.h"
-#include "mpic.h"
-
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-static int current_root_goodness = -1;
-#define DEFAULT_ROOT_DEVICE Root_SDA1  /* sda1 - slightly silly choice */
-
-extern  int powersave_nap;
-int sccdbg;
-
-sys_ctrler_t sys_ctrler;
-EXPORT_SYMBOL(sys_ctrler);
-
-#ifdef CONFIG_PMAC_SMU
-unsigned long smu_cmdbuf_abs;
-EXPORT_SYMBOL(smu_cmdbuf_abs);
-#endif
-
-extern void udbg_init_scc(struct device_node *np);
-
-static void __pmac pmac_show_cpuinfo(struct seq_file *m)
-{
-       struct device_node *np;
-       char *pp;
-       int plen;
-       char* mbname;
-       int mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
-                                       PMAC_MB_INFO_MODEL, 0);
-       unsigned int mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
-                                                PMAC_MB_INFO_FLAGS, 0);
-
-       if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
-                             (long)&mbname) != 0)
-               mbname = "Unknown";
-       
-       /* find motherboard type */
-       seq_printf(m, "machine\t\t: ");
-       np = of_find_node_by_path("/");
-       if (np != NULL) {
-               pp = (char *) get_property(np, "model", NULL);
-               if (pp != NULL)
-                       seq_printf(m, "%s\n", pp);
-               else
-                       seq_printf(m, "PowerMac\n");
-               pp = (char *) get_property(np, "compatible", &plen);
-               if (pp != NULL) {
-                       seq_printf(m, "motherboard\t:");
-                       while (plen > 0) {
-                               int l = strlen(pp) + 1;
-                               seq_printf(m, " %s", pp);
-                               plen -= l;
-                               pp += l;
-                       }
-                       seq_printf(m, "\n");
-               }
-               of_node_put(np);
-       } else
-               seq_printf(m, "PowerMac\n");
-
-       /* print parsed model */
-       seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
-       seq_printf(m, "pmac flags\t: %08x\n", mbflags);
-
-       /* Indicate newworld */
-       seq_printf(m, "pmac-generation\t: NewWorld\n");
-}
-
-
-static void __init pmac_setup_arch(void)
-{
-       /* init to some ~sane value until calibrate_delay() runs */
-       loops_per_jiffy = 50000000;
-
-       /* Probe motherboard chipset */
-       pmac_feature_init();
-#if 0
-       /* Lock-enable the SCC channel used for debug */
-       if (sccdbg) {
-               np = of_find_node_by_name(NULL, "escc");
-               if (np)
-                       pmac_call_feature(PMAC_FTR_SCC_ENABLE, np,
-                                         PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
-       }
-#endif
-       /* We can NAP */
-       powersave_nap = 1;
-
-#ifdef CONFIG_ADB_PMU
-       /* Initialize the PMU if any */
-       find_via_pmu();
-#endif
-#ifdef CONFIG_PMAC_SMU
-       /* Initialize the SMU if any */
-       smu_init();
-#endif
-
-       /* Init NVRAM access */
-       pmac_nvram_init();
-
-       /* Setup SMP callback */
-#ifdef CONFIG_SMP
-       pmac_setup_smp();
-#endif
-
-       /* Lookup PCI hosts */
-               pmac_pci_init();
-
-#ifdef CONFIG_DUMMY_CONSOLE
-       conswitchp = &dummy_con;
-#endif
-
-       printk(KERN_INFO "Using native/NAP idle loop\n");
-}
-
-#ifdef CONFIG_SCSI
-void note_scsi_host(struct device_node *node, void *host)
-{
-       /* Obsolete */
-}
-#endif
-
-
-static int initializing = 1;
-
-static int pmac_late_init(void)
-{
-       initializing = 0;
-       return 0;
-}
-
-late_initcall(pmac_late_init);
-
-/* can't be __init - can be called whenever a disk is first accessed */
-void __pmac note_bootable_part(dev_t dev, int part, int goodness)
-{
-       extern dev_t boot_dev;
-       char *p;
-
-       if (!initializing)
-               return;
-       if ((goodness <= current_root_goodness) &&
-           ROOT_DEV != DEFAULT_ROOT_DEVICE)
-               return;
-       p = strstr(saved_command_line, "root=");
-       if (p != NULL && (p == saved_command_line || p[-1] == ' '))
-               return;
-
-       if (!boot_dev || dev == boot_dev) {
-               ROOT_DEV = dev + part;
-               boot_dev = 0;
-               current_root_goodness = goodness;
-       }
-}
-
-static void __pmac pmac_restart(char *cmd)
-{
-       switch(sys_ctrler) {
-#ifdef CONFIG_ADB_PMU
-       case SYS_CTRLER_PMU:
-               pmu_restart();
-               break;
-#endif
-
-#ifdef CONFIG_PMAC_SMU
-       case SYS_CTRLER_SMU:
-               smu_restart();
-               break;
-#endif
-       default:
-               ;
-       }
-}
-
-static void __pmac pmac_power_off(void)
-{
-       switch(sys_ctrler) {
-#ifdef CONFIG_ADB_PMU
-       case SYS_CTRLER_PMU:
-               pmu_shutdown();
-               break;
-#endif
-#ifdef CONFIG_PMAC_SMU
-       case SYS_CTRLER_SMU:
-               smu_shutdown();
-               break;
-#endif
-       default:
-               ;
-       }
-}
-
-static void __pmac pmac_halt(void)
-{
-       pmac_power_off();
-}
-
-#ifdef CONFIG_BOOTX_TEXT
-static void btext_putc(unsigned char c)
-{
-       btext_drawchar(c);
-}
-
-static void __init init_boot_display(void)
-{
-       char *name;
-       struct device_node *np = NULL; 
-       int rc = -ENODEV;
-
-       printk("trying to initialize btext ...\n");
-
-       name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
-       if (name != NULL) {
-               np = of_find_node_by_path(name);
-               if (np != NULL) {
-                       if (strcmp(np->type, "display") != 0) {
-                               printk("boot stdout isn't a display !\n");
-                               of_node_put(np);
-                               np = NULL;
-                       }
-               }
-       }
-       if (np)
-               rc = btext_initialize(np);
-       if (rc == 0)
-               return;
-
-       for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
-               if (get_property(np, "linux,opened", NULL)) {
-                       printk("trying %s ...\n", np->full_name);
-                       rc = btext_initialize(np);
-                       printk("result: %d\n", rc);
-               }
-               if (rc == 0)
-                       return;
-       }
-}
-#endif /* CONFIG_BOOTX_TEXT */
-
-/* 
- * Early initialization.
- */
-static void __init pmac_init_early(void)
-{
-       DBG(" -> pmac_init_early\n");
-
-       /* Initialize hash table, from now on, we can take hash faults
-        * and call ioremap
-        */
-       hpte_init_native();
-
-       /* Init SCC */
-               if (strstr(cmd_line, "sccdbg")) {
-               sccdbg = 1;
-                       udbg_init_scc(NULL);
-               }
-#ifdef CONFIG_BOOTX_TEXT
-       else {
-               init_boot_display();
-
-               udbg_putc = btext_putc;
-       }
-#endif /* CONFIG_BOOTX_TEXT */
-
-       /* Setup interrupt mapping options */
-       ppc64_interrupt_controller = IC_OPEN_PIC;
-
-       iommu_init_early_u3();
-
-       DBG(" <- pmac_init_early\n");
-}
-
-static int pmac_u3_cascade(struct pt_regs *regs, void *data)
-{
-       return mpic_get_one_irq((struct mpic *)data, regs);
-}
-
-static __init void pmac_init_IRQ(void)
-{
-        struct device_node *irqctrler  = NULL;
-        struct device_node *irqctrler2 = NULL;
-       struct device_node *np = NULL;
-       struct mpic *mpic1, *mpic2;
-
-       /* We first try to detect Apple's new Core99 chipset, since mac-io
-        * is quite different on those machines and contains an IBM MPIC2.
-        */
-       while ((np = of_find_node_by_type(np, "open-pic")) != NULL) {
-               struct device_node *parent = of_get_parent(np);
-               if (parent && !strcmp(parent->name, "u3"))
-                       irqctrler2 = of_node_get(np);
-               else
-                       irqctrler = of_node_get(np);
-               of_node_put(parent);
-       }
-       if (irqctrler != NULL && irqctrler->n_addrs > 0) {
-               unsigned char senses[128];
-
-               printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
-                      (unsigned int)irqctrler->addrs[0].address);
-
-               prom_get_irq_senses(senses, 0, 128);
-               mpic1 = mpic_alloc(irqctrler->addrs[0].address,
-                                  MPIC_PRIMARY | MPIC_WANTS_RESET,
-                                  0, 0, 128, 256, senses, 128, " K2-MPIC  ");
-               BUG_ON(mpic1 == NULL);
-               mpic_init(mpic1);               
-
-               if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
-                   irqctrler2->n_addrs > 0) {
-                       printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
-                              (u32)irqctrler2->addrs[0].address,
-                              irqctrler2->intrs[0].line);
-
-                       pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
-                       prom_get_irq_senses(senses, 128, 128 + 128);
-
-                       /* We don't need to set MPIC_BROKEN_U3 here since we don't have
-                        * hypertransport interrupts routed to it
-                        */
-                       mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
-                                          MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
-                                          0, 128, 128, 0, senses, 128, " U3-MPIC  ");
-                       BUG_ON(mpic2 == NULL);
-                       mpic_init(mpic2);
-                       mpic_setup_cascade(irqctrler2->intrs[0].line,
-                                          pmac_u3_cascade, mpic2);
-               }
-       }
-       of_node_put(irqctrler);
-       of_node_put(irqctrler2);
-}
-
-static void __init pmac_progress(char *s, unsigned short hex)
-{
-       if (sccdbg) {
-               udbg_puts(s);
-               udbg_puts("\n");
-       }
-#ifdef CONFIG_BOOTX_TEXT
-       else if (boot_text_mapped) {
-               btext_drawstring(s);
-               btext_drawstring("\n");
-       }
-#endif /* CONFIG_BOOTX_TEXT */
-}
-
-/*
- * pmac has no legacy IO, anything calling this function has to
- * fail or bad things will happen
- */
-static int pmac_check_legacy_ioport(unsigned int baseport)
-{
-       return -ENODEV;
-}
-
-static int __init pmac_declare_of_platform_devices(void)
-{
-       struct device_node *np, *npp;
-
-       npp = of_find_node_by_name(NULL, "u3");
-       if (npp) {
-               for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
-                       if (strncmp(np->name, "i2c", 3) == 0) {
-                               of_platform_device_create(np, "u3-i2c", NULL);
-                               of_node_put(np);
-                               break;
-                       }
-               }
-               of_node_put(npp);
-       }
-        npp = of_find_node_by_type(NULL, "smu");
-        if (npp) {
-               of_platform_device_create(npp, "smu", NULL);
-               of_node_put(npp);
-       }
-
-       return 0;
-}
-
-device_initcall(pmac_declare_of_platform_devices);
-
-/*
- * Called very early, MMU is off, device-tree isn't unflattened
- */
-static int __init pmac_probe(int platform)
-{
-       if (platform != PLATFORM_POWERMAC)
-               return 0;
-       /*
-        * On U3, the DART (iommu) must be allocated now since it
-        * has an impact on htab_initialize (due to the large page it
-        * occupies having to be broken up so the DART itself is not
-        * part of the cacheable linar mapping
-        */
-       alloc_u3_dart_table();
-
-#ifdef CONFIG_PMAC_SMU
-       /*
-        * SMU based G5s need some memory below 2Gb, at least the current
-        * driver needs that. We have to allocate it now. We allocate 4k
-        * (1 small page) for now.
-        */
-       smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
-#endif /* CONFIG_PMAC_SMU */
-
-       return 1;
-}
-
-static int pmac_probe_mode(struct pci_bus *bus)
-{
-       struct device_node *node = bus->sysdata;
-
-       /* We need to use normal PCI probing for the AGP bus,
-          since the device for the AGP bridge isn't in the tree. */
-       if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
-               return PCI_PROBE_NORMAL;
-
-       return PCI_PROBE_DEVTREE;
-}
-
-struct machdep_calls __initdata pmac_md = {
-#ifdef CONFIG_HOTPLUG_CPU
-       .cpu_die                = generic_mach_cpu_die,
-#endif
-       .probe                  = pmac_probe,
-       .setup_arch             = pmac_setup_arch,
-       .init_early             = pmac_init_early,
-               .get_cpuinfo            = pmac_show_cpuinfo,
-       .init_IRQ               = pmac_init_IRQ,
-       .get_irq                = mpic_get_irq,
-       .pcibios_fixup          = pmac_pcibios_fixup,
-       .pci_probe_mode         = pmac_probe_mode,
-       .restart                = pmac_restart,
-       .power_off              = pmac_power_off,
-       .halt                   = pmac_halt,
-               .get_boot_time          = pmac_get_boot_time,
-               .set_rtc_time           = pmac_set_rtc_time,
-               .get_rtc_time           = pmac_get_rtc_time,
-       .calibrate_decr         = pmac_calibrate_decr,
-       .feature_call           = pmac_do_feature_call,
-       .progress               = pmac_progress,
-       .check_legacy_ioport    = pmac_check_legacy_ioport,
-       .idle_loop              = native_idle,
-       .enable_pmcs            = power4_enable_pmcs,
-};
diff --git a/arch/ppc64/kernel/pmac_smp.c b/arch/ppc64/kernel/pmac_smp.c
deleted file mode 100644 (file)
index a23de37..0000000
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * SMP support for power macintosh.
- *
- * We support both the old "powersurge" SMP architecture
- * and the current Core99 (G4 PowerMac) machines.
- *
- * Note that we don't support the very first rev. of
- * Apple/DayStar 2 CPUs board, the one with the funky
- * watchdog. Hopefully, none of these should be there except
- * maybe internally to Apple. I should probably still add some
- * code to detect this card though and disable SMP. --BenH.
- *
- * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
- * and Ben Herrenschmidt <benh@kernel.crashing.org>.
- *
- * Support for DayStar quad CPU cards
- * Copyright (C) XLR8, Inc. 1994-2000
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#undef DEBUG
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/irq.h>
-
-#include <asm/ptrace.h>
-#include <asm/atomic.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/smp.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/time.h>
-#include <asm/cacheflush.h>
-#include <asm/keylargo.h>
-#include <asm/pmac_low_i2c.h>
-
-#include "mpic.h"
-
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-extern void pmac_secondary_start_1(void);
-extern void pmac_secondary_start_2(void);
-extern void pmac_secondary_start_3(void);
-
-extern struct smp_ops_t *smp_ops;
-
-static void (*pmac_tb_freeze)(int freeze);
-static struct device_node *pmac_tb_clock_chip_host;
-static u8 pmac_tb_pulsar_addr;
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned long timebase;
-
-static void smp_core99_cypress_tb_freeze(int freeze)
-{
-       u8 data;
-       int rc;
-
-       /* Strangely, the device-tree says address is 0xd2, but darwin
-        * accesses 0xd0 ...
-        */
-       pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
-       rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
-                              0xd0 | pmac_low_i2c_read,
-                              0x81, &data, 1);
-       if (rc != 0)
-               goto bail;
-
-       data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
-
-               pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
-       rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
-                              0xd0 | pmac_low_i2c_write,
-                              0x81, &data, 1);
-
- bail:
-       if (rc != 0) {
-               printk("Cypress Timebase %s rc: %d\n",
-                      freeze ? "freeze" : "unfreeze", rc);
-               panic("Timebase freeze failed !\n");
-       }
-}
-
-static void smp_core99_pulsar_tb_freeze(int freeze)
-{
-       u8 data;
-       int rc;
-
-       pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
-       rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
-                              pmac_tb_pulsar_addr | pmac_low_i2c_read,
-                              0x2e, &data, 1);
-       if (rc != 0)
-               goto bail;
-
-       data = (data & 0x88) | (freeze ? 0x11 : 0x22);
-
-       pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
-       rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
-                              pmac_tb_pulsar_addr | pmac_low_i2c_write,
-                              0x2e, &data, 1);
- bail:
-       if (rc != 0) {
-               printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
-                      freeze ? "freeze" : "unfreeze", rc);
-               panic("Timebase freeze failed !\n");
-       }
-}
-
-
-static void smp_core99_give_timebase(void)
-{
-       /* Open i2c bus for synchronous access */
-       if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
-               panic("Can't open i2c for TB sync !\n");
-
-       spin_lock(&timebase_lock);
-       (*pmac_tb_freeze)(1);
-       mb();
-       timebase = get_tb();
-       spin_unlock(&timebase_lock);
-
-       while (timebase)
-               barrier();
-
-       spin_lock(&timebase_lock);
-       (*pmac_tb_freeze)(0);
-       spin_unlock(&timebase_lock);
-
-       /* Close i2c bus */
-       pmac_low_i2c_close(pmac_tb_clock_chip_host);
-}
-
-
-static void __devinit smp_core99_take_timebase(void)
-{
-       while (!timebase)
-               barrier();
-       spin_lock(&timebase_lock);
-       set_tb(timebase >> 32, timebase & 0xffffffff);
-       timebase = 0;
-       spin_unlock(&timebase_lock);
-}
-
-
-static int __init smp_core99_probe(void)
-{
-       struct device_node *cpus;       
-       struct device_node *cc; 
-       int ncpus = 0;
-
-       /* Maybe use systemconfiguration here ? */
-       if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
-
-       /* Count CPUs in the device-tree */
-               for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
-               ++ncpus;
-
-       printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
-
-       /* Nothing more to do if less than 2 of them */
-       if (ncpus <= 1)
-               return 1;
-
-       /* HW sync only on these platforms */
-       if (!machine_is_compatible("PowerMac7,2") &&
-           !machine_is_compatible("PowerMac7,3") &&
-           !machine_is_compatible("RackMac3,1"))
-               goto nohwsync;
-
-       /* Look for the clock chip */
-       for (cc = NULL; (cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL;) {
-               struct device_node *p = of_get_parent(cc);
-               u32 *reg;
-               int ok;
-               ok = p && device_is_compatible(p, "uni-n-i2c");
-               if (!ok)
-                       goto next;
-               reg = (u32 *)get_property(cc, "reg", NULL);
-               if (reg == NULL)
-                       goto next;
-               switch (*reg) {
-               case 0xd2:
-                       if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
-                               pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
-                               pmac_tb_pulsar_addr = 0xd2;
-                               printk(KERN_INFO "Timebase clock is Pulsar chip\n");
-                       } else if (device_is_compatible(cc, "cy28508")) {
-                               pmac_tb_freeze = smp_core99_cypress_tb_freeze;
-                               printk(KERN_INFO "Timebase clock is Cypress chip\n");
-                       }
-                       break;
-               case 0xd4:
-                       pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
-                       pmac_tb_pulsar_addr = 0xd4;
-                       printk(KERN_INFO "Timebase clock is Pulsar chip\n");
-                       break;
-               }
-               if (pmac_tb_freeze != NULL) {
-                       pmac_tb_clock_chip_host = p;
-                       smp_ops->give_timebase = smp_core99_give_timebase;
-                       smp_ops->take_timebase = smp_core99_take_timebase;
-                       of_node_put(cc);
-                       of_node_put(p);
-                       break;
-               }
-       next:
-               of_node_put(p);
-       }
-
- nohwsync:
-       mpic_request_ipis();
-
-       return ncpus;
-}
-
-static void __init smp_core99_kick_cpu(int nr)
-{
-       int save_vector, j;
-       unsigned long new_vector;
-       unsigned long flags;
-       volatile unsigned int *vector
-                = ((volatile unsigned int *)(KERNELBASE+0x100));
-
-       if (nr < 1 || nr > 3)
-               return;
-       if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
-
-       local_irq_save(flags);
-       local_irq_disable();
-
-       /* Save reset vector */
-       save_vector = *vector;
-
-       /* Setup fake reset vector that does    
-        *   b .pmac_secondary_start - KERNELBASE
-        */
-       switch(nr) {
-       case 1:
-               new_vector = (unsigned long)pmac_secondary_start_1;
-               break;
-       case 2:
-               new_vector = (unsigned long)pmac_secondary_start_2;
-               break;                  
-       case 3:
-       default:
-               new_vector = (unsigned long)pmac_secondary_start_3;
-               break;
-       }
-       *vector = 0x48000002 + (new_vector - KERNELBASE);
-
-       /* flush data cache and inval instruction cache */
-       flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
-
-       /* Put some life in our friend */
-       pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
-       paca[nr].cpu_start = 1;
-
-       /* FIXME: We wait a bit for the CPU to take the exception, I should
-        * instead wait for the entry code to set something for me. Well,
-        * ideally, all that crap will be done in prom.c and the CPU left
-        * in a RAM-based wait loop like CHRP.
-        */
-       for (j = 1; j < 1000000; j++)
-               mb();
-
-       /* Restore our exception vector */
-       *vector = save_vector;
-       flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
-
-       local_irq_restore(flags);
-       if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
-}
-
-static void __init smp_core99_setup_cpu(int cpu_nr)
-{
-       /* Setup MPIC */
-       mpic_setup_this_cpu();
-
-       if (cpu_nr == 0) {
-               extern void g5_phy_disable_cpu1(void);
-
-               /* If we didn't start the second CPU, we must take
-                * it off the bus
-                */
-               if (num_online_cpus() < 2)              
-                       g5_phy_disable_cpu1();
-               if (ppc_md.progress) ppc_md.progress("smp_core99_setup_cpu 0 done", 0x349);
-       }
-}
-
-struct smp_ops_t core99_smp_ops __pmacdata = {
-       .message_pass   = smp_mpic_message_pass,
-       .probe          = smp_core99_probe,
-       .kick_cpu       = smp_core99_kick_cpu,
-       .setup_cpu      = smp_core99_setup_cpu,
-       .give_timebase  = smp_generic_give_timebase,
-       .take_timebase  = smp_generic_take_timebase,
-};
-
-void __init pmac_setup_smp(void)
-{
-       smp_ops = &core99_smp_ops;
-#ifdef CONFIG_HOTPLUG_CPU
-       smp_ops->cpu_enable = generic_cpu_enable;
-       smp_ops->cpu_disable = generic_cpu_disable;
-       smp_ops->cpu_die = generic_cpu_die;
-#endif
-}
diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c
deleted file mode 100644 (file)
index 41bbb8c..0000000
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Support for periodic interrupts (100 per second) and for getting
- * the current time from the RTC on Power Macintoshes.
- *
- * We use the decrementer register for our periodic interrupts.
- *
- * Paul Mackerras      August 1996.
- * Copyright (C) 1996 Paul Mackerras.
- * Copyright (C) 2003-2005 Benjamin Herrenschmidt.
- *
- */
-#include <linux/config.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/adb.h>
-#include <linux/pmu.h>
-#include <linux/interrupt.h>
-
-#include <asm/sections.h>
-#include <asm/prom.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/machdep.h>
-#include <asm/time.h>
-#include <asm/nvram.h>
-#include <asm/smu.h>
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
-/* Apparently the RTC stores seconds since 1 Jan 1904 */
-#define RTC_OFFSET     2082844800
-
-/*
- * Calibrate the decrementer frequency with the VIA timer 1.
- */
-#define VIA_TIMER_FREQ_6       4700000 /* time 1 frequency * 6 */
-
-extern struct timezone sys_tz;
-extern void to_tm(int tim, struct rtc_time * tm);
-
-void __pmac pmac_get_rtc_time(struct rtc_time *tm)
-{
-       switch(sys_ctrler) {
-#ifdef CONFIG_ADB_PMU
-       case SYS_CTRLER_PMU: {
-               /* TODO: Move that to a function in the PMU driver */
-               struct adb_request req;
-               unsigned int now;
-
-               if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
-                       return;
-               pmu_wait_complete(&req);
-               if (req.reply_len != 4)
-                       printk(KERN_ERR "pmac_get_rtc_time: PMU returned a %d"
-                              " bytes reply\n", req.reply_len);
-               now = (req.reply[0] << 24) + (req.reply[1] << 16)
-                       + (req.reply[2] << 8) + req.reply[3];
-               DBG("get: %u -> %u\n", (int)now, (int)(now - RTC_OFFSET));
-               now -= RTC_OFFSET;
-
-               to_tm(now, tm);
-               tm->tm_year -= 1900;
-               tm->tm_mon -= 1;
-       
-               DBG("-> tm_mday: %d, tm_mon: %d, tm_year: %d, %d:%02d:%02d\n",
-                   tm->tm_mday, tm->tm_mon, tm->tm_year,
-                   tm->tm_hour, tm->tm_min, tm->tm_sec);
-               break;
-       }
-#endif /* CONFIG_ADB_PMU */
-
-#ifdef CONFIG_PMAC_SMU
-       case SYS_CTRLER_SMU:
-               smu_get_rtc_time(tm, 1);
-               break;
-#endif /* CONFIG_PMAC_SMU */
-       default:
-               ;
-       }
-}
-
-int __pmac pmac_set_rtc_time(struct rtc_time *tm)
-{
-       switch(sys_ctrler) {
-#ifdef CONFIG_ADB_PMU
-       case SYS_CTRLER_PMU: {
-               /* TODO: Move that to a function in the PMU driver */
-               struct adb_request req;
-               unsigned int nowtime;
-
-               DBG("set: tm_mday: %d, tm_mon: %d, tm_year: %d,"
-                   " %d:%02d:%02d\n",
-                   tm->tm_mday, tm->tm_mon, tm->tm_year,
-                   tm->tm_hour, tm->tm_min, tm->tm_sec);
-
-               nowtime = mktime(tm->tm_year + 1900, tm->tm_mon + 1,
-                                tm->tm_mday, tm->tm_hour, tm->tm_min,
-                                tm->tm_sec);
-
-               DBG("-> %u -> %u\n", (int)nowtime,
-                   (int)(nowtime + RTC_OFFSET));
-               nowtime += RTC_OFFSET;
-
-               if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
-                               nowtime >> 24, nowtime >> 16,
-                               nowtime >> 8, nowtime) < 0)
-                       return -ENXIO;
-               pmu_wait_complete(&req);
-               if (req.reply_len != 0)
-                       printk(KERN_ERR "pmac_set_rtc_time: PMU returned a %d"
-                              " bytes reply\n", req.reply_len);
-               return 0;
-       }
-#endif /* CONFIG_ADB_PMU */
-
-#ifdef CONFIG_PMAC_SMU
-       case SYS_CTRLER_SMU:
-               return smu_set_rtc_time(tm, 1);
-#endif /* CONFIG_PMAC_SMU */
-       default:
-               return -ENODEV;
-       }
-}
-
-void __init pmac_get_boot_time(struct rtc_time *tm)
-{
-       pmac_get_rtc_time(tm);
-
-#ifdef disabled__CONFIG_NVRAM
-       s32 delta = 0;
-       int dst;
-       
-       delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
-       delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
-       delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
-       if (delta & 0x00800000UL)
-               delta |= 0xFF000000UL;
-       dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
-       printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
-               dst ? "on" : "off");
-#endif
-}
-
-/*
- * Query the OF and get the decr frequency.
- * FIXME: merge this with generic_calibrate_decr
- */
-void __init pmac_calibrate_decr(void)
-{
-       struct device_node *cpu;
-       unsigned int freq, *fp;
-       struct div_result divres;
-
-       /*
-        * The cpu node should have a timebase-frequency property
-        * to tell us the rate at which the decrementer counts.
-        */
-       cpu = find_type_devices("cpu");
-       if (cpu == 0)
-               panic("can't find cpu node in time_init");
-       fp = (unsigned int *) get_property(cpu, "timebase-frequency", NULL);
-       if (fp == 0)
-               panic("can't get cpu timebase frequency");
-       freq = *fp;
-       printk("time_init: decrementer frequency = %u.%.6u MHz\n",
-              freq/1000000, freq%1000000);
-       tb_ticks_per_jiffy = freq / HZ;
-       tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
-       tb_ticks_per_usec = freq / 1000000;
-       tb_to_us = mulhwu_scale_factor(freq, 1000000);
-       div128_by_32( 1024*1024, 0, tb_ticks_per_sec, &divres );
-       tb_to_xs = divres.result_low;
-       ppc_tb_freq = freq;
-
-       fp = (unsigned int *)get_property(cpu, "clock-frequency", NULL);
-       if (fp == 0)
-               panic("can't get cpu processor frequency");
-       ppc_proc_freq = *fp;
-
-       setup_default_decr();
-}
-
index 705742f..84006e2 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/hw_irq.h>
 #include <asm/abs_addr.h>
 #include <asm/cacheflush.h>
-#include <asm/iSeries/HvCallSc.h>
 
 EXPORT_SYMBOL(strcpy);
 EXPORT_SYMBOL(strncpy);
@@ -46,17 +45,6 @@ EXPORT_SYMBOL(__strnlen_user);
 
 EXPORT_SYMBOL(reloc_offset);
 
-#ifdef CONFIG_PPC_ISERIES
-EXPORT_SYMBOL(HvCall0);
-EXPORT_SYMBOL(HvCall1);
-EXPORT_SYMBOL(HvCall2);
-EXPORT_SYMBOL(HvCall3);
-EXPORT_SYMBOL(HvCall4);
-EXPORT_SYMBOL(HvCall5);
-EXPORT_SYMBOL(HvCall6);
-EXPORT_SYMBOL(HvCall7);
-#endif
-
 EXPORT_SYMBOL(_insb);
 EXPORT_SYMBOL(_outsb);
 EXPORT_SYMBOL(_insw);
@@ -77,14 +65,6 @@ EXPORT_SYMBOL(giveup_altivec);
 EXPORT_SYMBOL(__flush_icache_range);
 EXPORT_SYMBOL(flush_dcache_range);
 
-#ifdef CONFIG_SMP
-#ifdef CONFIG_PPC_ISERIES
-EXPORT_SYMBOL(local_get_flags);
-EXPORT_SYMBOL(local_irq_disable);
-EXPORT_SYMBOL(local_irq_restore);
-#endif
-#endif
-
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memmove);
index 7035deb..a0866f1 100644 (file)
@@ -78,6 +78,7 @@ typedef int interpret_func(struct device_node *, unsigned long *,
 extern struct rtas_t rtas;
 extern struct lmb lmb;
 extern unsigned long klimit;
+extern unsigned long memory_limit;
 
 static int __initdata dt_root_addr_cells;
 static int __initdata dt_root_size_cells;
@@ -1063,7 +1064,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
 {
        u32 *prop;
        u64 *prop64;
-       extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end;
+       extern unsigned long tce_alloc_start, tce_alloc_end;
 
        DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
 
@@ -1237,7 +1238,7 @@ void __init early_init_devtree(void *params)
        lmb_init();
        scan_flat_dt(early_init_dt_scan_root, NULL);
        scan_flat_dt(early_init_dt_scan_memory, NULL);
-       lmb_enforce_memory_limit();
+       lmb_enforce_memory_limit(memory_limit);
        lmb_analyze();
        systemcfg->physicalMemorySize = lmb_phys_mem_size();
        lmb_reserve(0, __pa(klimit));
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c
deleted file mode 100644 (file)
index b1c044c..0000000
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- *  linux/arch/ppc64/kernel/ptrace.c
- *
- *  PowerPC version
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- *  Derived from "arch/m68k/kernel/ptrace.c"
- *  Copyright (C) 1994 by Hamish Macdonald
- *  Taken from linux/kernel/ptrace.c and modified for M680x0.
- *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
- *
- * Modified by Cort Dougan (cort@hq.fsmlabs.com)
- * and Paul Mackerras (paulus@linuxcare.com.au).
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file README.legal in the main directory of
- * this archive for more details.
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/security.h>
-#include <linux/audit.h>
-#include <linux/seccomp.h>
-#include <linux/signal.h>
-
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/ptrace-common.h>
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void ptrace_disable(struct task_struct *child)
-{
-       /* make sure the single step bit is not set. */
-       clear_single_step(child);
-}
-
-int sys_ptrace(long request, long pid, long addr, long data)
-{
-       struct task_struct *child;
-       int ret = -EPERM;
-
-       lock_kernel();
-       if (request == PTRACE_TRACEME) {
-               /* are we already being traced? */
-               if (current->ptrace & PT_PTRACED)
-                       goto out;
-               ret = security_ptrace(current->parent, current);
-               if (ret)
-                       goto out;
-               /* set the ptrace bit in the process flags. */
-               current->ptrace |= PT_PTRACED;
-               ret = 0;
-               goto out;
-       }
-       ret = -ESRCH;
-       read_lock(&tasklist_lock);
-       child = find_task_by_pid(pid);
-       if (child)
-               get_task_struct(child);
-       read_unlock(&tasklist_lock);
-       if (!child)
-               goto out;
-
-       ret = -EPERM;
-       if (pid == 1)           /* you may not mess with init */
-               goto out_tsk;
-
-       if (request == PTRACE_ATTACH) {
-               ret = ptrace_attach(child);
-               goto out_tsk;
-       }
-
-       ret = ptrace_check_attach(child, request == PTRACE_KILL);
-       if (ret < 0)
-               goto out_tsk;
-
-       switch (request) {
-       /* when I and D space are separate, these will need to be fixed. */
-       case PTRACE_PEEKTEXT: /* read word at location addr. */ 
-       case PTRACE_PEEKDATA: {
-               unsigned long tmp;
-               int copied;
-
-               copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-               ret = -EIO;
-               if (copied != sizeof(tmp))
-                       break;
-               ret = put_user(tmp,(unsigned long __user *) data);
-               break;
-       }
-
-       /* read the word at location addr in the USER area. */
-       case PTRACE_PEEKUSR: {
-               unsigned long index;
-               unsigned long tmp;
-
-               ret = -EIO;
-               /* convert to index and check */
-               index = (unsigned long) addr >> 3;
-               if ((addr & 7) || (index > PT_FPSCR))
-                       break;
-
-               if (index < PT_FPR0) {
-                       tmp = get_reg(child, (int)index);
-               } else {
-                       flush_fp_to_thread(child);
-                       tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
-               }
-               ret = put_user(tmp,(unsigned long __user *) data);
-               break;
-       }
-
-       /* If I and D space are separate, this will have to be fixed. */
-       case PTRACE_POKETEXT: /* write the word at location addr. */
-       case PTRACE_POKEDATA:
-               ret = 0;
-               if (access_process_vm(child, addr, &data, sizeof(data), 1)
-                               == sizeof(data))
-                       break;
-               ret = -EIO;
-               break;
-
-       /* write the word at location addr in the USER area */
-       case PTRACE_POKEUSR: {
-               unsigned long index;
-
-               ret = -EIO;
-               /* convert to index and check */
-               index = (unsigned long) addr >> 3;
-               if ((addr & 7) || (index > PT_FPSCR))
-                       break;
-
-               if (index == PT_ORIG_R3)
-                       break;
-               if (index < PT_FPR0) {
-                       ret = put_reg(child, index, data);
-               } else {
-                       flush_fp_to_thread(child);
-                       ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
-                       ret = 0;
-               }
-               break;
-       }
-
-       case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
-       case PTRACE_CONT: { /* restart after signal. */
-               ret = -EIO;
-               if (!valid_signal(data))
-                       break;
-               if (request == PTRACE_SYSCALL)
-                       set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               else
-                       clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               child->exit_code = data;
-               /* make sure the single step bit is not set. */
-               clear_single_step(child);
-               wake_up_process(child);
-               ret = 0;
-               break;
-       }
-
-       /*
-        * make the child exit.  Best I can do is send it a sigkill.
-        * perhaps it should be put in the status that it wants to
-        * exit.
-        */
-       case PTRACE_KILL: {
-               ret = 0;
-               if (child->exit_state == EXIT_ZOMBIE)   /* already dead */
-                       break;
-               child->exit_code = SIGKILL;
-               /* make sure the single step bit is not set. */
-               clear_single_step(child);
-               wake_up_process(child);
-               break;
-       }
-
-       case PTRACE_SINGLESTEP: {  /* set the trap flag. */
-               ret = -EIO;
-               if (!valid_signal(data))
-                       break;
-               clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               set_single_step(child);
-               child->exit_code = data;
-               /* give it a chance to run. */
-               wake_up_process(child);
-               ret = 0;
-               break;
-       }
-
-       case PTRACE_GET_DEBUGREG: {
-               ret = -EINVAL;
-               /* We only support one DABR and no IABRS at the moment */
-               if (addr > 0)
-                       break;
-               ret = put_user(child->thread.dabr,
-                              (unsigned long __user *)data);
-               break;
-       }
-
-       case PTRACE_SET_DEBUGREG:
-               ret = ptrace_set_debugreg(child, addr, data);
-               break;
-
-       case PTRACE_DETACH:
-               ret = ptrace_detach(child, data);
-               break;
-
-       case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
-               int i;
-               unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
-               unsigned long __user *tmp = (unsigned long __user *)addr;
-
-               for (i = 0; i < 32; i++) {
-                       ret = put_user(*reg, tmp);
-                       if (ret)
-                               break;
-                       reg++;
-                       tmp++;
-               }
-               break;
-       }
-
-       case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
-               int i;
-               unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
-               unsigned long __user *tmp = (unsigned long __user *)addr;
-
-               for (i = 0; i < 32; i++) {
-                       ret = get_user(*reg, tmp);
-                       if (ret)
-                               break;
-                       reg++;
-                       tmp++;
-               }
-               break;
-       }
-
-       case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
-               int i;
-               unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
-               unsigned long __user *tmp = (unsigned long __user *)addr;
-
-               flush_fp_to_thread(child);
-
-               for (i = 0; i < 32; i++) {
-                       ret = put_user(*reg, tmp);
-                       if (ret)
-                               break;
-                       reg++;
-                       tmp++;
-               }
-               break;
-       }
-
-       case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
-               int i;
-               unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
-               unsigned long __user *tmp = (unsigned long __user *)addr;
-
-               flush_fp_to_thread(child);
-
-               for (i = 0; i < 32; i++) {
-                       ret = get_user(*reg, tmp);
-                       if (ret)
-                               break;
-                       reg++;
-                       tmp++;
-               }
-               break;
-       }
-
-#ifdef CONFIG_ALTIVEC
-       case PTRACE_GETVRREGS:
-               /* Get the child altivec register state. */
-               flush_altivec_to_thread(child);
-               ret = get_vrregs((unsigned long __user *)data, child);
-               break;
-
-       case PTRACE_SETVRREGS:
-               /* Set the child altivec register state. */
-               flush_altivec_to_thread(child);
-               ret = set_vrregs(child, (unsigned long __user *)data);
-               break;
-#endif
-
-       default:
-               ret = ptrace_request(child, request, addr, data);
-               break;
-       }
-out_tsk:
-       put_task_struct(child);
-out:
-       unlock_kernel();
-       return ret;
-}
-
-static void do_syscall_trace(void)
-{
-       /* the 0x80 provides a way for the tracing parent to distinguish
-          between a syscall stop and SIGTRAP delivery */
-       ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-                                ? 0x80 : 0));
-
-       /*
-        * this isn't the same as continuing with a signal, but it will do
-        * for normal use.  strace only continues with a signal if the
-        * stopping signal is not SIGTRAP.  -brl
-        */
-       if (current->exit_code) {
-               send_sig(current->exit_code, current, 1);
-               current->exit_code = 0;
-       }
-}
-
-void do_syscall_trace_enter(struct pt_regs *regs)
-{
-       secure_computing(regs->gpr[0]);
-
-       if (test_thread_flag(TIF_SYSCALL_TRACE)
-           && (current->ptrace & PT_PTRACED))
-               do_syscall_trace();
-
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(current,
-                                   test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64,
-                                   regs->gpr[0],
-                                   regs->gpr[3], regs->gpr[4],
-                                   regs->gpr[5], regs->gpr[6]);
-
-}
-
-void do_syscall_trace_leave(struct pt_regs *regs)
-{
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(current, 
-                                  (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
-                                  regs->result);
-
-       if ((test_thread_flag(TIF_SYSCALL_TRACE)
-            || test_thread_flag(TIF_SINGLESTEP))
-           && (current->ptrace & PT_PTRACED))
-               do_syscall_trace();
-}
index 1f3ff86..5bdd5b0 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/init.h>
 #include <linux/seq_file.h>
 #include <linux/bitops.h>
+#include <linux/rtc.h>
 
 #include <asm/uaccess.h>
 #include <asm/processor.h>
index 5e8eb33..36adab5 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/delay.h>
 #include <asm/uaccess.h>
 #include <asm/systemcfg.h>
+#include <asm/ppcdebug.h>
 
 struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
 
index 4a9719b..3ad15c9 100644 (file)
@@ -38,9 +38,8 @@
 #include <asm/pci-bridge.h>
 #include <asm/iommu.h>
 #include <asm/rtas.h>
-
-#include "mpic.h"
-#include "pci.h"
+#include <asm/mpic.h>
+#include <asm/ppc-pci.h>
 
 /* RTAS tokens */
 static int read_pci_config;
@@ -401,7 +400,7 @@ unsigned long __init find_and_init_phbs(void)
                if (!phb)
                        continue;
 
-               pci_process_bridge_OF_ranges(phb, node);
+               pci_process_bridge_OF_ranges(phb, node, 0);
                pci_setup_phb_io(phb, index == 0);
 #ifdef CONFIG_PPC_PSERIES
                if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
@@ -451,7 +450,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
        if (!phb)
                return NULL;
 
-       pci_process_bridge_OF_ranges(phb, dn);
+       pci_process_bridge_OF_ranges(phb, dn, primary);
 
        pci_setup_phb_io_dynamic(phb, primary);
        of_node_put(root);
index 6ff52bc..79e7ed2 100644 (file)
 #include <asm/time.h>
 #include <asm/rtas.h>
 
-#include <asm/iSeries/mf.h>
 #include <asm/machdep.h>
 
-extern int piranha_simulator;
-
 /*
  *     We sponge a minor off of the misc major. No need slurping
  *     up another valuable major dev number for this. If you add
@@ -265,44 +262,10 @@ static int rtc_read_proc(char *page, char **start, off_t off,
         return len;
 }
 
-#ifdef CONFIG_PPC_ISERIES
-/*
- * Get the RTC from the virtual service processor
- * This requires flowing LpEvents to the primary partition
- */
-void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
-{
-       if (piranha_simulator)
-               return;
-
-       mf_get_rtc(rtc_tm);
-       rtc_tm->tm_mon--;
-}
-
-/*
- * Set the RTC in the virtual service processor
- * This requires flowing LpEvents to the primary partition
- */
-int iSeries_set_rtc_time(struct rtc_time *tm)
-{
-       mf_set_rtc(tm);
-       return 0;
-}
-
-void iSeries_get_boot_time(struct rtc_time *tm)
-{
-       if ( piranha_simulator )
-               return;
-
-       mf_get_boot_rtc(tm);
-       tm->tm_mon  -= 1;
-}
-#endif
-
 #ifdef CONFIG_PPC_RTAS
 #define MAX_RTC_WAIT 5000      /* 5 sec */
 #define RTAS_CLOCK_BUSY (-2)
-void rtas_get_boot_time(struct rtc_time *rtc_tm)
+unsigned long rtas_get_boot_time(void)
 {
        int ret[8];
        int error, wait_time;
@@ -322,15 +285,10 @@ void rtas_get_boot_time(struct rtc_time *rtc_tm)
        if (error != 0 && printk_ratelimit()) {
                printk(KERN_WARNING "error: reading the clock failed (%d)\n",
                        error);
-               return;
+               return 0;
        }
 
-       rtc_tm->tm_sec = ret[5];
-       rtc_tm->tm_min = ret[4];
-       rtc_tm->tm_hour = ret[3];
-       rtc_tm->tm_mday = ret[2];
-       rtc_tm->tm_mon = ret[1] - 1;
-       rtc_tm->tm_year = ret[0] - 1900;
+       return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
 }
 
 /* NOTE: get_rtc_time will get an error if executed in interrupt context
index 5ac48bd..44ee6eb 100644 (file)
@@ -58,6 +58,7 @@
 #include <asm/mmu.h>
 #include <asm/lmb.h>
 #include <asm/iSeries/ItLpNaca.h>
+#include <asm/firmware.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -153,7 +154,7 @@ struct screen_info screen_info = {
        .orig_video_points = 16
 };
 
-#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
 
 static int smt_enabled_cmdline;
 
@@ -306,15 +307,13 @@ static void __init setup_cpu_maps(void)
 
        systemcfg->processorCount = num_present_cpus();
 }
-#endif /* defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) */
-
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#endif /* CONFIG_SMP */
 
 extern struct machdep_calls pSeries_md;
 extern struct machdep_calls pmac_md;
 extern struct machdep_calls maple_md;
 extern struct machdep_calls bpa_md;
+extern struct machdep_calls iseries_md;
 
 /* Ultimately, stuff them in an elf section like initcalls... */
 static struct machdep_calls __initdata *machines[] = {
@@ -330,6 +329,9 @@ static struct machdep_calls __initdata *machines[] = {
 #ifdef CONFIG_PPC_BPA
        &bpa_md,
 #endif
+#ifdef CONFIG_PPC_ISERIES
+       &iseries_md,
+#endif
        NULL
 };
 
@@ -401,7 +403,8 @@ void __init early_setup(unsigned long dt_ptr)
        /*
         * Initialize stab / SLB management
         */
-       stab_initialize(lpaca->stab_real);
+       if (!firmware_has_feature(FW_FEATURE_ISERIES))
+               stab_initialize(lpaca->stab_real);
 
        /*
         * Initialize the MMU Hash table and create the linear mapping
@@ -532,8 +535,6 @@ static void __init check_for_initrd(void)
 #endif /* CONFIG_BLK_DEV_INITRD */
 }
 
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 /*
  * Do some initial setup of the system.  The parameters are those which 
  * were passed in from the bootloader.
@@ -542,14 +543,6 @@ void __init setup_system(void)
 {
        DBG(" -> setup_system()\n");
 
-#ifdef CONFIG_PPC_ISERIES
-       /* pSeries systems are identified in prom.c via OF. */
-       if (itLpNaca.xLparInstalled == 1)
-               systemcfg->platform = PLATFORM_ISERIES_LPAR;
-
-       ppc_md.init_early();
-#else /* CONFIG_PPC_ISERIES */
-
        /*
         * Unflatten the device-tree passed by prom_init or kexec
         */
@@ -592,6 +585,10 @@ void __init setup_system(void)
         */
        finish_device_tree();
 
+#ifdef CONFIG_BOOTX_TEXT
+       init_boot_display();
+#endif
+
        /*
         * Initialize xmon
         */
@@ -607,9 +604,8 @@ void __init setup_system(void)
        strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
 
        parse_early_param();
-#endif /* !CONFIG_PPC_ISERIES */
 
-#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
+#ifdef CONFIG_SMP
        /*
         * iSeries has already initialized the cpu maps at this point.
         */
@@ -619,7 +615,7 @@ void __init setup_system(void)
         * we can map physical -> logical CPU ids
         */
        smp_release_cpus();
-#endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */
+#endif
 
        printk("Starting Linux PPC64 %s\n", system_utsname.version);
 
@@ -711,8 +707,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_id == NR_CPUS) {
                seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
 
-               if (ppc_md.get_cpuinfo != NULL)
-                       ppc_md.get_cpuinfo(m);
+               if (ppc_md.show_cpuinfo != NULL)
+                       ppc_md.show_cpuinfo(m);
 
                return 0;
        }
@@ -1047,6 +1043,10 @@ void __init setup_arch(char **cmdline_p)
        /* initialize the syscall map in systemcfg */
        setup_syscall_map();
 
+#ifdef CONFIG_DUMMY_CONSOLE
+       conswitchp = &dummy_con;
+#endif
+
        ppc_md.setup_arch();
 
        /* Use the default idle loop if the platform hasn't provided one. */
@@ -1091,15 +1091,6 @@ void ppc64_terminate_msg(unsigned int src, const char *msg)
        printk("[terminate]%04x %s\n", src, msg);
 }
 
-/* This should only be called on processor 0 during calibrate decr */
-void __init setup_default_decr(void)
-{
-       struct paca_struct *lpaca = get_paca();
-
-       lpaca->default_decr = tb_ticks_per_jiffy;
-       lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy;
-}
-
 #ifndef CONFIG_PPC_ISERIES
 /*
  * This function can be used by platforms to "find" legacy serial ports.
index 793b562..017c129 100644 (file)
@@ -45,8 +45,7 @@
 #include <asm/cputable.h>
 #include <asm/system.h>
 #include <asm/abs_addr.h>
-
-#include "mpic.h"
+#include <asm/mpic.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -70,28 +69,6 @@ void smp_call_function_interrupt(void);
 int smt_enabled_at_boot = 1;
 
 #ifdef CONFIG_MPIC
-void smp_mpic_message_pass(int target, int msg)
-{
-       /* make sure we're sending something that translates to an IPI */
-       if ( msg > 0x3 ){
-               printk("SMP %d: smp_message_pass: unknown msg %d\n",
-                      smp_processor_id(), msg);
-               return;
-       }
-       switch ( target )
-       {
-       case MSG_ALL:
-               mpic_send_ipi(msg, 0xffffffff);
-               break;
-       case MSG_ALL_BUT_SELF:
-               mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
-               break;
-       default:
-               mpic_send_ipi(msg, 1 << target);
-               break;
-       }
-}
-
 int __init smp_mpic_probe(void)
 {
        int nr_cpus;
@@ -128,21 +105,6 @@ void __devinit smp_generic_kick_cpu(int nr)
 
 #endif /* CONFIG_MPIC */
 
-static void __init smp_space_timers(unsigned int max_cpus)
-{
-       int i;
-       unsigned long offset = tb_ticks_per_jiffy / max_cpus;
-       unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
-
-       for_each_cpu(i) {
-               if (i != boot_cpuid) {
-                       paca[i].next_jiffy_update_tb =
-                               previous_tb + offset;
-                       previous_tb = paca[i].next_jiffy_update_tb;
-               }
-       }
-}
-
 void smp_message_recv(int msg, struct pt_regs *regs)
 {
        switch(msg) {
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
deleted file mode 100644 (file)
index 7467ae5..0000000
+++ /dev/null
@@ -1,568 +0,0 @@
-/*
- *  linux/arch/ppc64/kernel/traps.c
- *
- *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- *
- *  Modified by Cort Dougan (cort@cs.nmt.edu)
- *  and Paul Mackerras (paulus@cs.anu.edu.au)
- */
-
-/*
- * This file handles the architecture-dependent parts of hardware exceptions
- */
-
-#include <linux/config.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/kprobes.h>
-#include <asm/kdebug.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/ppcdebug.h>
-#include <asm/rtas.h>
-#include <asm/systemcfg.h>
-#include <asm/machdep.h>
-#include <asm/pmc.h>
-
-#ifdef CONFIG_DEBUGGER
-int (*__debugger)(struct pt_regs *regs);
-int (*__debugger_ipi)(struct pt_regs *regs);
-int (*__debugger_bpt)(struct pt_regs *regs);
-int (*__debugger_sstep)(struct pt_regs *regs);
-int (*__debugger_iabr_match)(struct pt_regs *regs);
-int (*__debugger_dabr_match)(struct pt_regs *regs);
-int (*__debugger_fault_handler)(struct pt_regs *regs);
-
-EXPORT_SYMBOL(__debugger);
-EXPORT_SYMBOL(__debugger_ipi);
-EXPORT_SYMBOL(__debugger_bpt);
-EXPORT_SYMBOL(__debugger_sstep);
-EXPORT_SYMBOL(__debugger_iabr_match);
-EXPORT_SYMBOL(__debugger_dabr_match);
-EXPORT_SYMBOL(__debugger_fault_handler);
-#endif
-
-struct notifier_block *ppc64_die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
-
-int register_die_notifier(struct notifier_block *nb)
-{
-       int err = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&die_notifier_lock, flags);
-       err = notifier_chain_register(&ppc64_die_chain, nb);
-       spin_unlock_irqrestore(&die_notifier_lock, flags);
-       return err;
-}
-
-/*
- * Trap & Exception support
- */
-
-static DEFINE_SPINLOCK(die_lock);
-
-int die(const char *str, struct pt_regs *regs, long err)
-{
-       static int die_counter;
-       int nl = 0;
-
-       if (debugger(regs))
-               return 1;
-
-       console_verbose();
-       spin_lock_irq(&die_lock);
-       bust_spinlocks(1);
-       printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
-#ifdef CONFIG_PREEMPT
-       printk("PREEMPT ");
-       nl = 1;
-#endif
-#ifdef CONFIG_SMP
-       printk("SMP NR_CPUS=%d ", NR_CPUS);
-       nl = 1;
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk("DEBUG_PAGEALLOC ");
-       nl = 1;
-#endif
-#ifdef CONFIG_NUMA
-       printk("NUMA ");
-       nl = 1;
-#endif
-       switch(systemcfg->platform) {
-               case PLATFORM_PSERIES:
-                       printk("PSERIES ");
-                       nl = 1;
-                       break;
-               case PLATFORM_PSERIES_LPAR:
-                       printk("PSERIES LPAR ");
-                       nl = 1;
-                       break;
-               case PLATFORM_ISERIES_LPAR:
-                       printk("ISERIES LPAR ");
-                       nl = 1;
-                       break;
-               case PLATFORM_POWERMAC:
-                       printk("POWERMAC ");
-                       nl = 1;
-                       break;
-               case PLATFORM_BPA:
-                       printk("BPA ");
-                       nl = 1;
-                       break;
-       }
-       if (nl)
-               printk("\n");
-       print_modules();
-       show_regs(regs);
-       bust_spinlocks(0);
-       spin_unlock_irq(&die_lock);
-
-       if (in_interrupt())
-               panic("Fatal exception in interrupt");
-
-       if (panic_on_oops) {
-               printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
-               ssleep(5);
-               panic("Fatal exception");
-       }
-       do_exit(SIGSEGV);
-
-       return 0;
-}
-
-void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
-{
-       siginfo_t info;
-
-       if (!user_mode(regs)) {
-               if (die("Exception in kernel mode", regs, signr))
-                       return;
-       }
-
-       memset(&info, 0, sizeof(info));
-       info.si_signo = signr;
-       info.si_code = code;
-       info.si_addr = (void __user *) addr;
-       force_sig_info(signr, &info, current);
-}
-
-void system_reset_exception(struct pt_regs *regs)
-{
-       /* See if any machine dependent calls */
-       if (ppc_md.system_reset_exception)
-               ppc_md.system_reset_exception(regs);
-
-       die("System Reset", regs, 0);
-
-       /* Must die if the interrupt is not recoverable */
-       if (!(regs->msr & MSR_RI))
-               panic("Unrecoverable System Reset");
-
-       /* What should we do here? We could issue a shutdown or hard reset. */
-}
-
-void machine_check_exception(struct pt_regs *regs)
-{
-       int recover = 0;
-
-       /* See if any machine dependent calls */
-       if (ppc_md.machine_check_exception)
-               recover = ppc_md.machine_check_exception(regs);
-
-       if (recover)
-               return;
-
-       if (debugger_fault_handler(regs))
-               return;
-       die("Machine check", regs, 0);
-
-       /* Must die if the interrupt is not recoverable */
-       if (!(regs->msr & MSR_RI))
-               panic("Unrecoverable Machine check");
-}
-
-void unknown_exception(struct pt_regs *regs)
-{
-       printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
-              regs->nip, regs->msr, regs->trap);
-
-       _exception(SIGTRAP, regs, 0, 0);
-}
-
-void instruction_breakpoint_exception(struct pt_regs *regs)
-{
-       if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
-                                       5, SIGTRAP) == NOTIFY_STOP)
-               return;
-       if (debugger_iabr_match(regs))
-               return;
-       _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
-}
-
-void __kprobes single_step_exception(struct pt_regs *regs)
-{
-       regs->msr &= ~MSR_SE;  /* Turn off 'trace' bit */
-
-       if (notify_die(DIE_SSTEP, "single_step", regs, 5,
-                                       5, SIGTRAP) == NOTIFY_STOP)
-               return;
-       if (debugger_sstep(regs))
-               return;
-
-       _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
-}
-
-/*
- * After we have successfully emulated an instruction, we have to
- * check if the instruction was being single-stepped, and if so,
- * pretend we got a single-step exception.  This was pointed out
- * by Kumar Gala.  -- paulus
- */
-static inline void emulate_single_step(struct pt_regs *regs)
-{
-       if (regs->msr & MSR_SE)
-               single_step_exception(regs);
-}
-
-static void parse_fpe(struct pt_regs *regs)
-{
-       int code = 0;
-       unsigned long fpscr;
-
-       flush_fp_to_thread(current);
-
-       fpscr = current->thread.fpscr;
-
-       /* Invalid operation */
-       if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
-               code = FPE_FLTINV;
-
-       /* Overflow */
-       else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
-               code = FPE_FLTOVF;
-
-       /* Underflow */
-       else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
-               code = FPE_FLTUND;
-
-       /* Divide by zero */
-       else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
-               code = FPE_FLTDIV;
-
-       /* Inexact result */
-       else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
-               code = FPE_FLTRES;
-
-       _exception(SIGFPE, regs, code, regs->nip);
-}
-
-/*
- * Illegal instruction emulation support.  Return non-zero if we can't
- * emulate, or -EFAULT if the associated memory access caused an access
- * fault.  Return zero on success.
- */
-
-#define INST_MFSPR_PVR         0x7c1f42a6
-#define INST_MFSPR_PVR_MASK    0xfc1fffff
-
-#define INST_DCBA              0x7c0005ec
-#define INST_DCBA_MASK         0x7c0007fe
-
-#define INST_MCRXR             0x7c000400
-#define INST_MCRXR_MASK                0x7c0007fe
-
-static int emulate_instruction(struct pt_regs *regs)
-{
-       unsigned int instword;
-
-       if (!user_mode(regs))
-               return -EINVAL;
-
-       CHECK_FULL_REGS(regs);
-
-       if (get_user(instword, (unsigned int __user *)(regs->nip)))
-               return -EFAULT;
-
-       /* Emulate the mfspr rD, PVR. */
-       if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
-               unsigned int rd;
-
-               rd = (instword >> 21) & 0x1f;
-               regs->gpr[rd] = mfspr(SPRN_PVR);
-               return 0;
-       }
-
-       /* Emulating the dcba insn is just a no-op.  */
-       if ((instword & INST_DCBA_MASK) == INST_DCBA) {
-               static int warned;
-
-               if (!warned) {
-                       printk(KERN_WARNING
-                              "process %d (%s) uses obsolete 'dcba' insn\n",
-                              current->pid, current->comm);
-                       warned = 1;
-               }
-               return 0;
-       }
-
-       /* Emulate the mcrxr insn.  */
-       if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
-               static int warned;
-               unsigned int shift;
-
-               if (!warned) {
-                       printk(KERN_WARNING
-                              "process %d (%s) uses obsolete 'mcrxr' insn\n",
-                              current->pid, current->comm);
-                       warned = 1;
-               }
-
-               shift = (instword >> 21) & 0x1c;
-               regs->ccr &= ~(0xf0000000 >> shift);
-               regs->ccr |= (regs->xer & 0xf0000000) >> shift;
-               regs->xer &= ~0xf0000000;
-               return 0;
-       }
-
-       return -EINVAL;
-}
-
-/*
- * Look through the list of trap instructions that are used for BUG(),
- * BUG_ON() and WARN_ON() and see if we hit one.  At this point we know
- * that the exception was caused by a trap instruction of some kind.
- * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
- * otherwise.
- */
-extern struct bug_entry __start___bug_table[], __stop___bug_table[];
-
-#ifndef CONFIG_MODULES
-#define module_find_bug(x)     NULL
-#endif
-
-struct bug_entry *find_bug(unsigned long bugaddr)
-{
-       struct bug_entry *bug;
-
-       for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
-               if (bugaddr == bug->bug_addr)
-                       return bug;
-       return module_find_bug(bugaddr);
-}
-
-static int
-check_bug_trap(struct pt_regs *regs)
-{
-       struct bug_entry *bug;
-       unsigned long addr;
-
-       if (regs->msr & MSR_PR)
-               return 0;       /* not in kernel */
-       addr = regs->nip;       /* address of trap instruction */
-       if (addr < PAGE_OFFSET)
-               return 0;
-       bug = find_bug(regs->nip);
-       if (bug == NULL)
-               return 0;
-       if (bug->line & BUG_WARNING_TRAP) {
-               /* this is a WARN_ON rather than BUG/BUG_ON */
-               printk(KERN_ERR "Badness in %s at %s:%d\n",
-                      bug->function, bug->file,
-                     (unsigned int)bug->line & ~BUG_WARNING_TRAP);
-               show_stack(current, (void *)regs->gpr[1]);
-               return 1;
-       }
-       printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
-              bug->function, bug->file, (unsigned int)bug->line);
-       return 0;
-}
-
-void __kprobes program_check_exception(struct pt_regs *regs)
-{
-       if (debugger_fault_handler(regs))
-               return;
-
-       if (regs->msr & 0x100000) {
-               /* IEEE FP exception */
-               parse_fpe(regs);
-       } else if (regs->msr & 0x20000) {
-               /* trap exception */
-
-               if (notify_die(DIE_BPT, "breakpoint", regs, 5,
-                                       5, SIGTRAP) == NOTIFY_STOP)
-                       return;
-               if (debugger_bpt(regs))
-                       return;
-
-               if (check_bug_trap(regs)) {
-                       regs->nip += 4;
-                       return;
-               }
-               _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
-
-       } else {
-               /* Privileged or illegal instruction; try to emulate it. */
-               switch (emulate_instruction(regs)) {
-               case 0:
-                       regs->nip += 4;
-                       emulate_single_step(regs);
-                       break;
-
-               case -EFAULT:
-                       _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
-                       break;
-
-               default:
-                       if (regs->msr & 0x40000)
-                               /* priveleged */
-                               _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
-                       else
-                               /* illegal */
-                               _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
-                       break;
-               }
-       }
-}
-
-void kernel_fp_unavailable_exception(struct pt_regs *regs)
-{
-       printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
-                         "%lx at %lx\n", regs->trap, regs->nip);
-       die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
-}
-
-void altivec_unavailable_exception(struct pt_regs *regs)
-{
-       if (user_mode(regs)) {
-               /* A user program has executed an altivec instruction,
-                  but this kernel doesn't support altivec. */
-               _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
-               return;
-       }
-       printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
-                         "%lx at %lx\n", regs->trap, regs->nip);
-       die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
-}
-
-extern perf_irq_t perf_irq;
-
-void performance_monitor_exception(struct pt_regs *regs)
-{
-       perf_irq(regs);
-}
-
-void alignment_exception(struct pt_regs *regs)
-{
-       int fixed;
-
-       fixed = fix_alignment(regs);
-
-       if (fixed == 1) {
-               regs->nip += 4; /* skip over emulated instruction */
-               emulate_single_step(regs);
-               return;
-       }
-
-       /* Operand address was bad */   
-       if (fixed == -EFAULT) {
-               if (user_mode(regs)) {
-                       _exception(SIGSEGV, regs, SEGV_MAPERR, regs->dar);
-               } else {
-                       /* Search exception table */
-                       bad_page_fault(regs, regs->dar, SIGSEGV);
-               }
-
-               return;
-       }
-
-       _exception(SIGBUS, regs, BUS_ADRALN, regs->nip);
-}
-
-#ifdef CONFIG_ALTIVEC
-void altivec_assist_exception(struct pt_regs *regs)
-{
-       int err;
-       siginfo_t info;
-
-       if (!user_mode(regs)) {
-               printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
-                      " at %lx\n", regs->nip);
-               die("Kernel VMX/Altivec assist exception", regs, SIGILL);
-       }
-
-       flush_altivec_to_thread(current);
-
-       err = emulate_altivec(regs);
-       if (err == 0) {
-               regs->nip += 4;         /* skip emulated instruction */
-               emulate_single_step(regs);
-               return;
-       }
-
-       if (err == -EFAULT) {
-               /* got an error reading the instruction */
-               info.si_signo = SIGSEGV;
-               info.si_errno = 0;
-               info.si_code = SEGV_MAPERR;
-               info.si_addr = (void __user *) regs->nip;
-               force_sig_info(SIGSEGV, &info, current);
-       } else {
-               /* didn't recognize the instruction */
-               /* XXX quick hack for now: set the non-Java bit in the VSCR */
-               if (printk_ratelimit())
-                       printk(KERN_ERR "Unrecognized altivec instruction "
-                              "in %s at %lx\n", current->comm, regs->nip);
-               current->thread.vscr.u[3] |= 0x10000;
-       }
-}
-#endif /* CONFIG_ALTIVEC */
-
-/*
- * We enter here if we get an unrecoverable exception, that is, one
- * that happened at a point where the RI (recoverable interrupt) bit
- * in the MSR is 0.  This indicates that SRR0/1 are live, and that
- * we therefore lost state by taking this exception.
- */
-void unrecoverable_exception(struct pt_regs *regs)
-{
-       printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
-              regs->trap, regs->nip);
-       die("Unrecoverable exception", regs, SIGABRT);
-}
-
-/*
- * We enter here if we discover during exception entry that we are
- * running in supervisor mode with a userspace value in the stack pointer.
- */
-void kernel_bad_stack(struct pt_regs *regs)
-{
-       printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
-              regs->gpr[1], regs->nip);
-       die("Bad kernel stack pointer", regs, SIGABRT);
-}
-
-void __init trap_init(void)
-{
-}
index 41ea09c..fba871a 100644 (file)
 #include <asm/abs_addr.h>
 #include <asm/cacheflush.h>
 #include <asm/lmb.h>
-
-#include "pci.h"
+#include <asm/dart.h>
+#include <asm/ppc-pci.h>
 
 extern int iommu_force_on;
 
-/* physical base of DART registers */
-#define DART_BASE        0xf8033000UL
-
-/* Offset from base to control register */
-#define DARTCNTL   0
-/* Offset from base to exception register */
-#define DARTEXCP   0x10
-/* Offset from base to TLB tag registers */
-#define DARTTAG    0x1000
-
-
-/* Control Register fields */
-
-/* base address of table (pfn) */
-#define DARTCNTL_BASE_MASK    0xfffff
-#define DARTCNTL_BASE_SHIFT   12
-
-#define DARTCNTL_FLUSHTLB     0x400
-#define DARTCNTL_ENABLE       0x200
-
-/* size of table in pages */
-#define DARTCNTL_SIZE_MASK    0x1ff
-#define DARTCNTL_SIZE_SHIFT   0
-
-/* DART table fields */
-#define DARTMAP_VALID   0x80000000
-#define DARTMAP_RPNMASK 0x00ffffff
-
 /* Physical base address and size of the DART table */
 unsigned long dart_tablebase; /* exported to htab_initialize */
 static unsigned long dart_tablesize;
@@ -152,18 +124,21 @@ static void dart_build(struct iommu_table *tbl, long index,
 
        DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
 
+       index <<= DART_PAGE_FACTOR;
+       npages <<= DART_PAGE_FACTOR;
+
        dp = ((unsigned int*)tbl->it_base) + index;
        
        /* On U3, all memory is contigous, so we can move this
         * out of the loop.
         */
        while (npages--) {
-               rpn = virt_to_abs(uaddr) >> PAGE_SHIFT;
+               rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT;
 
                *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
 
                rpn++;
-               uaddr += PAGE_SIZE;
+               uaddr += DART_PAGE_SIZE;
        }
 
        dart_dirty = 1;
@@ -181,6 +156,9 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
 
        DBG("dart: free at: %lx, %lx\n", index, npages);
 
+       index <<= DART_PAGE_FACTOR;
+       npages <<= DART_PAGE_FACTOR;
+
        dp  = ((unsigned int *)tbl->it_base) + index;
                
        while (npages--)
@@ -209,10 +187,10 @@ static int dart_init(struct device_node *dart_node)
         * that to work around what looks like a problem with the HT bridge
         * prefetching into invalid pages and corrupting data
         */
-       tmp = lmb_alloc(PAGE_SIZE, PAGE_SIZE);
+       tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
        if (!tmp)
                panic("U3-DART: Cannot allocate spare page!");
-       dart_emptyval = DARTMAP_VALID | ((tmp >> PAGE_SHIFT) & DARTMAP_RPNMASK);
+       dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK);
 
        /* Map in DART registers. FIXME: Use device node to get base address */
        dart = ioremap(DART_BASE, 0x7000);
@@ -223,8 +201,8 @@ static int dart_init(struct device_node *dart_node)
         * table size and enable bit
         */
        regword = DARTCNTL_ENABLE | 
-               ((dart_tablebase >> PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) |
-               (((dart_tablesize >> PAGE_SHIFT) & DARTCNTL_SIZE_MASK)
+               ((dart_tablebase >> DART_PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) |
+               (((dart_tablesize >> DART_PAGE_SHIFT) & DARTCNTL_SIZE_MASK)
                                 << DARTCNTL_SIZE_SHIFT);
        dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize);
 
index 8ae8f20..31b604a 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/unistd.h>
 #include <asm/vdso.h>
+#include <asm/ptrace.h>                /* XXX for __SIGNAL_FRAMESIZE */
 
        .text
 
diff --git a/arch/ppc64/kernel/vecemu.c b/arch/ppc64/kernel/vecemu.c
deleted file mode 100644 (file)
index cb20762..0000000
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Routines to emulate some Altivec/VMX instructions, specifically
- * those that can trap when given denormalized operands in Java mode.
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
-
-/* Functions in vector.S */
-extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
-extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b);
-extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
-extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
-extern void vrefp(vector128 *dst, vector128 *src);
-extern void vrsqrtefp(vector128 *dst, vector128 *src);
-extern void vexptep(vector128 *dst, vector128 *src);
-
-static unsigned int exp2s[8] = {
-       0x800000,
-       0x8b95c2,
-       0x9837f0,
-       0xa5fed7,
-       0xb504f3,
-       0xc5672a,
-       0xd744fd,
-       0xeac0c7
-};
-
-/*
- * Computes an estimate of 2^x.  The `s' argument is the 32-bit
- * single-precision floating-point representation of x.
- */
-static unsigned int eexp2(unsigned int s)
-{
-       int exp, pwr;
-       unsigned int mant, frac;
-
-       /* extract exponent field from input */
-       exp = ((s >> 23) & 0xff) - 127;
-       if (exp > 7) {
-               /* check for NaN input */
-               if (exp == 128 && (s & 0x7fffff) != 0)
-                       return s | 0x400000;    /* return QNaN */
-               /* 2^-big = 0, 2^+big = +Inf */
-               return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */
-       }
-       if (exp < -23)
-               return 0x3f800000;      /* 1.0 */
-
-       /* convert to fixed point integer in 9.23 representation */
-       pwr = (s & 0x7fffff) | 0x800000;
-       if (exp > 0)
-               pwr <<= exp;
-       else
-               pwr >>= -exp;
-       if (s & 0x80000000)
-               pwr = -pwr;
-
-       /* extract integer part, which becomes exponent part of result */
-       exp = (pwr >> 23) + 126;
-       if (exp >= 254)
-               return 0x7f800000;
-       if (exp < -23)
-               return 0;
-
-       /* table lookup on top 3 bits of fraction to get mantissa */
-       mant = exp2s[(pwr >> 20) & 7];
-
-       /* linear interpolation using remaining 20 bits of fraction */
-       asm("mulhwu %0,%1,%2" : "=r" (frac)
-           : "r" (pwr << 12), "r" (0x172b83ff));
-       asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant));
-       mant += frac;
-
-       if (exp >= 0)
-               return mant + (exp << 23);
-
-       /* denormalized result */
-       exp = -exp;
-       mant += 1 << (exp - 1);
-       return mant >> exp;
-}
-
-/*
- * Computes an estimate of log_2(x).  The `s' argument is the 32-bit
- * single-precision floating-point representation of x.
- */
-static unsigned int elog2(unsigned int s)
-{
-       int exp, mant, lz, frac;
-
-       exp = s & 0x7f800000;
-       mant = s & 0x7fffff;
-       if (exp == 0x7f800000) {        /* Inf or NaN */
-               if (mant != 0)
-                       s |= 0x400000;  /* turn NaN into QNaN */
-               return s;
-       }
-       if ((exp | mant) == 0)          /* +0 or -0 */
-               return 0xff800000;      /* return -Inf */
-
-       if (exp == 0) {
-               /* denormalized */
-               asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant));
-               mant <<= lz - 8;
-               exp = (-118 - lz) << 23;
-       } else {
-               mant |= 0x800000;
-               exp -= 127 << 23;
-       }
-
-       if (mant >= 0xb504f3) {                         /* 2^0.5 * 2^23 */
-               exp |= 0x400000;                        /* 0.5 * 2^23 */
-               asm("mulhwu %0,%1,%2" : "=r" (mant)
-                   : "r" (mant), "r" (0xb504f334));    /* 2^-0.5 * 2^32 */
-       }
-       if (mant >= 0x9837f0) {                         /* 2^0.25 * 2^23 */
-               exp |= 0x200000;                        /* 0.25 * 2^23 */
-               asm("mulhwu %0,%1,%2" : "=r" (mant)
-                   : "r" (mant), "r" (0xd744fccb));    /* 2^-0.25 * 2^32 */
-       }
-       if (mant >= 0x8b95c2) {                         /* 2^0.125 * 2^23 */
-               exp |= 0x100000;                        /* 0.125 * 2^23 */
-               asm("mulhwu %0,%1,%2" : "=r" (mant)
-                   : "r" (mant), "r" (0xeac0c6e8));    /* 2^-0.125 * 2^32 */
-       }
-       if (mant > 0x800000) {                          /* 1.0 * 2^23 */
-               /* calculate (mant - 1) * 1.381097463 */
-               /* 1.381097463 == 0.125 / (2^0.125 - 1) */
-               asm("mulhwu %0,%1,%2" : "=r" (frac)
-                   : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a));
-               exp += frac;
-       }
-       s = exp & 0x80000000;
-       if (exp != 0) {
-               if (s)
-                       exp = -exp;
-               asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp));
-               lz = 8 - lz;
-               if (lz > 0)
-                       exp >>= lz;
-               else if (lz < 0)
-                       exp <<= -lz;
-               s += ((lz + 126) << 23) + exp;
-       }
-       return s;
-}
-
-#define VSCR_SAT       1
-
-static int ctsxs(unsigned int x, int scale, unsigned int *vscrp)
-{
-       int exp, mant;
-
-       exp = (x >> 23) & 0xff;
-       mant = x & 0x7fffff;
-       if (exp == 255 && mant != 0)
-               return 0;               /* NaN -> 0 */
-       exp = exp - 127 + scale;
-       if (exp < 0)
-               return 0;               /* round towards zero */
-       if (exp >= 31) {
-               /* saturate, unless the result would be -2^31 */
-               if (x + (scale << 23) != 0xcf000000)
-                       *vscrp |= VSCR_SAT;
-               return (x & 0x80000000)? 0x80000000: 0x7fffffff;
-       }
-       mant |= 0x800000;
-       mant = (mant << 7) >> (30 - exp);
-       return (x & 0x80000000)? -mant: mant;
-}
-
-static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp)
-{
-       int exp;
-       unsigned int mant;
-
-       exp = (x >> 23) & 0xff;
-       mant = x & 0x7fffff;
-       if (exp == 255 && mant != 0)
-               return 0;               /* NaN -> 0 */
-       exp = exp - 127 + scale;
-       if (exp < 0)
-               return 0;               /* round towards zero */
-       if (x & 0x80000000) {
-               /* negative => saturate to 0 */
-               *vscrp |= VSCR_SAT;
-               return 0;
-       }
-       if (exp >= 32) {
-               /* saturate */
-               *vscrp |= VSCR_SAT;
-               return 0xffffffff;
-       }
-       mant |= 0x800000;
-       mant = (mant << 8) >> (31 - exp);
-       return mant;
-}
-
-/* Round to floating integer, towards 0 */
-static unsigned int rfiz(unsigned int x)
-{
-       int exp;
-
-       exp = ((x >> 23) & 0xff) - 127;
-       if (exp == 128 && (x & 0x7fffff) != 0)
-               return x | 0x400000;    /* NaN -> make it a QNaN */
-       if (exp >= 23)
-               return x;               /* it's an integer already (or Inf) */
-       if (exp < 0)
-               return x & 0x80000000;  /* |x| < 1.0 rounds to 0 */
-       return x & ~(0x7fffff >> exp);
-}
-
-/* Round to floating integer, towards +/- Inf */
-static unsigned int rfii(unsigned int x)
-{
-       int exp, mask;
-
-       exp = ((x >> 23) & 0xff) - 127;
-       if (exp == 128 && (x & 0x7fffff) != 0)
-               return x | 0x400000;    /* NaN -> make it a QNaN */
-       if (exp >= 23)
-               return x;               /* it's an integer already (or Inf) */
-       if ((x & 0x7fffffff) == 0)
-               return x;               /* +/-0 -> +/-0 */
-       if (exp < 0)
-               /* 0 < |x| < 1.0 rounds to +/- 1.0 */
-               return (x & 0x80000000) | 0x3f800000;
-       mask = 0x7fffff >> exp;
-       /* mantissa overflows into exponent - that's OK,
-          it can't overflow into the sign bit */
-       return (x + mask) & ~mask;
-}
-
-/* Round to floating integer, to nearest */
-static unsigned int rfin(unsigned int x)
-{
-       int exp, half;
-
-       exp = ((x >> 23) & 0xff) - 127;
-       if (exp == 128 && (x & 0x7fffff) != 0)
-               return x | 0x400000;    /* NaN -> make it a QNaN */
-       if (exp >= 23)
-               return x;               /* it's an integer already (or Inf) */
-       if (exp < -1)
-               return x & 0x80000000;  /* |x| < 0.5 -> +/-0 */
-       if (exp == -1)
-               /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */
-               return (x & 0x80000000) | 0x3f800000;
-       half = 0x400000 >> exp;
-       /* add 0.5 to the magnitude and chop off the fraction bits */
-       return (x + half) & ~(0x7fffff >> exp);
-}
-
-int
-emulate_altivec(struct pt_regs *regs)
-{
-       unsigned int instr, i;
-       unsigned int va, vb, vc, vd;
-       vector128 *vrs;
-
-       if (get_user(instr, (unsigned int __user *) regs->nip))
-               return -EFAULT;
-       if ((instr >> 26) != 4)
-               return -EINVAL;         /* not an altivec instruction */
-       vd = (instr >> 21) & 0x1f;
-       va = (instr >> 16) & 0x1f;
-       vb = (instr >> 11) & 0x1f;
-       vc = (instr >> 6) & 0x1f;
-
-       vrs = current->thread.vr;
-       switch (instr & 0x3f) {
-       case 10:
-               switch (vc) {
-               case 0: /* vaddfp */
-                       vaddfp(&vrs[vd], &vrs[va], &vrs[vb]);
-                       break;
-               case 1: /* vsubfp */
-                       vsubfp(&vrs[vd], &vrs[va], &vrs[vb]);
-                       break;
-               case 4: /* vrefp */
-                       vrefp(&vrs[vd], &vrs[vb]);
-                       break;
-               case 5: /* vrsqrtefp */
-                       vrsqrtefp(&vrs[vd], &vrs[vb]);
-                       break;
-               case 6: /* vexptefp */
-                       for (i = 0; i < 4; ++i)
-                               vrs[vd].u[i] = eexp2(vrs[vb].u[i]);
-                       break;
-               case 7: /* vlogefp */
-                       for (i = 0; i < 4; ++i)
-                               vrs[vd].u[i] = elog2(vrs[vb].u[i]);
-                       break;
-               case 8:         /* vrfin */
-                       for (i = 0; i < 4; ++i)
-                               vrs[vd].u[i] = rfin(vrs[vb].u[i]);
-                       break;
-               case 9:         /* vrfiz */
-                       for (i = 0; i < 4; ++i)
-                               vrs[vd].u[i] = rfiz(vrs[vb].u[i]);
-                       break;
-               case 10:        /* vrfip */
-                       for (i = 0; i < 4; ++i) {
-                               u32 x = vrs[vb].u[i];
-                               x = (x & 0x80000000)? rfiz(x): rfii(x);
-                               vrs[vd].u[i] = x;
-                       }
-                       break;
-               case 11:        /* vrfim */
-                       for (i = 0; i < 4; ++i) {
-                               u32 x = vrs[vb].u[i];
-                               x = (x & 0x80000000)? rfii(x): rfiz(x);
-                               vrs[vd].u[i] = x;
-                       }
-                       break;
-               case 14:        /* vctuxs */
-                       for (i = 0; i < 4; ++i)
-                               vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
-                                               &current->thread.vscr.u[3]);
-                       break;
-               case 15:        /* vctsxs */
-                       for (i = 0; i < 4; ++i)
-                               vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
-                                               &current->thread.vscr.u[3]);
-                       break;
-               default:
-                       return -EINVAL;
-               }
-               break;
-       case 46:        /* vmaddfp */
-               vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
-               break;
-       case 47:        /* vnmsubfp */
-               vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
index 0306510..022f220 100644 (file)
@@ -1,3 +1,4 @@
+#include <asm/page.h>
 #include <asm-generic/vmlinux.lds.h>
 
 OUTPUT_ARCH(powerpc:common64)
@@ -17,7 +18,7 @@ SECTIONS
        LOCK_TEXT
        KPROBES_TEXT
        *(.fixup)
-       . = ALIGN(4096);
+       . = ALIGN(PAGE_SIZE);
        _etext = .;
        }
 
@@ -43,7 +44,7 @@ SECTIONS
 
 
   /* will be freed after init */
-  . = ALIGN(4096);
+  . = ALIGN(PAGE_SIZE);
   __init_begin = .;
 
   .init.text : {
@@ -83,7 +84,7 @@ SECTIONS
 
   SECURITY_INIT
 
-  . = ALIGN(4096);
+  . = ALIGN(PAGE_SIZE);
   .init.ramfs : {
        __initramfs_start = .;
        *(.init.ramfs)
@@ -96,18 +97,22 @@ SECTIONS
        __per_cpu_end = .;
        }
 
+  . = ALIGN(PAGE_SIZE);
   . = ALIGN(16384);
   __init_end = .;
   /* freed after init ends here */
 
 
   /* Read/write sections */
+  . = ALIGN(PAGE_SIZE);
   . = ALIGN(16384);
+  _sdata = .;
   /* The initial task and kernel stack */
   .data.init_task : {
        *(.data.init_task)
        }
 
+  . = ALIGN(PAGE_SIZE);
   .data.page_aligned : {
        *(.data.page_aligned)
        }
@@ -129,18 +134,18 @@ SECTIONS
        __toc_start = .;
        *(.got)
        *(.toc)
-       . = ALIGN(4096);
+       . = ALIGN(PAGE_SIZE);
        _edata = .;
        }
 
 
-  . = ALIGN(4096);
+  . = ALIGN(PAGE_SIZE);
   .bss : {
        __bss_start = .;
        *(.bss)
        __bss_stop = .;
        }
 
-  . = ALIGN(4096);
+  . = ALIGN(PAGE_SIZE);
   _end = . ;
 }
diff --git a/arch/ppc64/mm/Makefile b/arch/ppc64/mm/Makefile
deleted file mode 100644 (file)
index 3695d00..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the linux ppc-specific parts of the memory manager.
-#
-
-EXTRA_CFLAGS += -mno-minimal-toc
-
-obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \
-       slb_low.o slb.o stab.o mmap.o
-obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
deleted file mode 100644 (file)
index be64b15..0000000
+++ /dev/null
@@ -1,869 +0,0 @@
-/*
- *  PowerPC version 
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
- *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
- *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
- *
- *  Derived from "arch/i386/mm/init.c"
- *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
- *
- *  Dave Engebretsen <engebret@us.ibm.com>
- *      Rework for PPC64 port.
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/config.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/stddef.h>
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/highmem.h>
-#include <linux/idr.h>
-#include <linux/nodemask.h>
-#include <linux/module.h>
-
-#include <asm/pgalloc.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-#include <asm/lmb.h>
-#include <asm/rtas.h>
-#include <asm/io.h>
-#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/mmu.h>
-#include <asm/uaccess.h>
-#include <asm/smp.h>
-#include <asm/machdep.h>
-#include <asm/tlb.h>
-#include <asm/eeh.h>
-#include <asm/processor.h>
-#include <asm/mmzone.h>
-#include <asm/cputable.h>
-#include <asm/ppcdebug.h>
-#include <asm/sections.h>
-#include <asm/system.h>
-#include <asm/iommu.h>
-#include <asm/abs_addr.h>
-#include <asm/vdso.h>
-#include <asm/imalloc.h>
-
-#if PGTABLE_RANGE > USER_VSID_RANGE
-#warning Limited user VSID range means pagetable space is wasted
-#endif
-
-#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
-#warning TASK_SIZE is smaller than it needs to be.
-#endif
-
-int mem_init_done;
-unsigned long ioremap_bot = IMALLOC_BASE;
-static unsigned long phbs_io_bot = PHBS_IO_BASE;
-
-extern pgd_t swapper_pg_dir[];
-extern struct task_struct *current_set[NR_CPUS];
-
-unsigned long klimit = (unsigned long)_end;
-
-unsigned long _SDR1=0;
-unsigned long _ASR=0;
-
-/* max amount of RAM to use */
-unsigned long __max_memory;
-
-/* info on what we think the IO hole is */
-unsigned long  io_hole_start;
-unsigned long  io_hole_size;
-
-void show_mem(void)
-{
-       unsigned long total = 0, reserved = 0;
-       unsigned long shared = 0, cached = 0;
-       struct page *page;
-       pg_data_t *pgdat;
-       unsigned long i;
-
-       printk("Mem-info:\n");
-       show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-       for_each_pgdat(pgdat) {
-               for (i = 0; i < pgdat->node_spanned_pages; i++) {
-                       page = pgdat_page_nr(pgdat, i);
-                       total++;
-                       if (PageReserved(page))
-                               reserved++;
-                       else if (PageSwapCache(page))
-                               cached++;
-                       else if (page_count(page))
-                               shared += page_count(page) - 1;
-               }
-       }
-       printk("%ld pages of RAM\n", total);
-       printk("%ld reserved pages\n", reserved);
-       printk("%ld pages shared\n", shared);
-       printk("%ld pages swap cached\n", cached);
-}
-
-#ifdef CONFIG_PPC_ISERIES
-
-void __iomem *ioremap(unsigned long addr, unsigned long size)
-{
-       return (void __iomem *)addr;
-}
-
-extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
-                      unsigned long flags)
-{
-       return (void __iomem *)addr;
-}
-
-void iounmap(volatile void __iomem *addr)
-{
-       return;
-}
-
-#else
-
-/*
- * map_io_page currently only called by __ioremap
- * map_io_page adds an entry to the ioremap page table
- * and adds an entry to the HPT, possibly bolting it
- */
-static int map_io_page(unsigned long ea, unsigned long pa, int flags)
-{
-       pgd_t *pgdp;
-       pud_t *pudp;
-       pmd_t *pmdp;
-       pte_t *ptep;
-       unsigned long vsid;
-
-       if (mem_init_done) {
-               spin_lock(&init_mm.page_table_lock);
-               pgdp = pgd_offset_k(ea);
-               pudp = pud_alloc(&init_mm, pgdp, ea);
-               if (!pudp)
-                       return -ENOMEM;
-               pmdp = pmd_alloc(&init_mm, pudp, ea);
-               if (!pmdp)
-                       return -ENOMEM;
-               ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
-               if (!ptep)
-                       return -ENOMEM;
-               set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
-                                                         __pgprot(flags)));
-               spin_unlock(&init_mm.page_table_lock);
-       } else {
-               unsigned long va, vpn, hash, hpteg;
-
-               /*
-                * If the mm subsystem is not fully up, we cannot create a
-                * linux page table entry for this mapping.  Simply bolt an
-                * entry in the hardware page table.
-                */
-               vsid = get_kernel_vsid(ea);
-               va = (vsid << 28) | (ea & 0xFFFFFFF);
-               vpn = va >> PAGE_SHIFT;
-
-               hash = hpt_hash(vpn, 0);
-
-               hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
-
-               /* Panic if a pte grpup is full */
-               if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
-                                      HPTE_V_BOLTED,
-                                      _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
-                   == -1) {
-                       panic("map_io_page: could not insert mapping");
-               }
-       }
-       return 0;
-}
-
-
-static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
-                           unsigned long ea, unsigned long size,
-                           unsigned long flags)
-{
-       unsigned long i;
-
-       if ((flags & _PAGE_PRESENT) == 0)
-               flags |= pgprot_val(PAGE_KERNEL);
-
-       for (i = 0; i < size; i += PAGE_SIZE)
-               if (map_io_page(ea+i, pa+i, flags))
-                       return NULL;
-
-       return (void __iomem *) (ea + (addr & ~PAGE_MASK));
-}
-
-
-void __iomem *
-ioremap(unsigned long addr, unsigned long size)
-{
-       return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
-}
-
-void __iomem * __ioremap(unsigned long addr, unsigned long size,
-                        unsigned long flags)
-{
-       unsigned long pa, ea;
-       void __iomem *ret;
-
-       /*
-        * Choose an address to map it to.
-        * Once the imalloc system is running, we use it.
-        * Before that, we map using addresses going
-        * up from ioremap_bot.  imalloc will use
-        * the addresses from ioremap_bot through
-        * IMALLOC_END
-        * 
-        */
-       pa = addr & PAGE_MASK;
-       size = PAGE_ALIGN(addr + size) - pa;
-
-       if (size == 0)
-               return NULL;
-
-       if (mem_init_done) {
-               struct vm_struct *area;
-               area = im_get_free_area(size);
-               if (area == NULL)
-                       return NULL;
-               ea = (unsigned long)(area->addr);
-               ret = __ioremap_com(addr, pa, ea, size, flags);
-               if (!ret)
-                       im_free(area->addr);
-       } else {
-               ea = ioremap_bot;
-               ret = __ioremap_com(addr, pa, ea, size, flags);
-               if (ret)
-                       ioremap_bot += size;
-       }
-       return ret;
-}
-
-#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
-
-int __ioremap_explicit(unsigned long pa, unsigned long ea,
-                      unsigned long size, unsigned long flags)
-{
-       struct vm_struct *area;
-       void __iomem *ret;
-       
-       /* For now, require page-aligned values for pa, ea, and size */
-       if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
-           !IS_PAGE_ALIGNED(size)) {
-               printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
-               return 1;
-       }
-       
-       if (!mem_init_done) {
-               /* Two things to consider in this case:
-                * 1) No records will be kept (imalloc, etc) that the region
-                *    has been remapped
-                * 2) It won't be easy to iounmap() the region later (because
-                *    of 1)
-                */
-               ;
-       } else {
-               area = im_get_area(ea, size,
-                       IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
-               if (area == NULL) {
-                       /* Expected when PHB-dlpar is in play */
-                       return 1;
-               }
-               if (ea != (unsigned long) area->addr) {
-                       printk(KERN_ERR "unexpected addr return from "
-                              "im_get_area\n");
-                       return 1;
-               }
-       }
-       
-       ret = __ioremap_com(pa, pa, ea, size, flags);
-       if (ret == NULL) {
-               printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
-               return 1;
-       }
-       if (ret != (void *) ea) {
-               printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
-               return 1;
-       }
-
-       return 0;
-}
-
-/*  
- * Unmap an IO region and remove it from imalloc'd list.
- * Access to IO memory should be serialized by driver.
- * This code is modeled after vmalloc code - unmap_vm_area()
- *
- * XXX what about calls before mem_init_done (ie python_countermeasures())
- */
-void iounmap(volatile void __iomem *token)
-{
-       void *addr;
-
-       if (!mem_init_done)
-               return;
-       
-       addr = (void *) ((unsigned long __force) token & PAGE_MASK);
-
-       im_free(addr);
-}
-
-static int iounmap_subset_regions(unsigned long addr, unsigned long size)
-{
-       struct vm_struct *area;
-
-       /* Check whether subsets of this region exist */
-       area = im_get_area(addr, size, IM_REGION_SUPERSET);
-       if (area == NULL)
-               return 1;
-
-       while (area) {
-               iounmap((void __iomem *) area->addr);
-               area = im_get_area(addr, size,
-                               IM_REGION_SUPERSET);
-       }
-
-       return 0;
-}
-
-int iounmap_explicit(volatile void __iomem *start, unsigned long size)
-{
-       struct vm_struct *area;
-       unsigned long addr;
-       int rc;
-       
-       addr = (unsigned long __force) start & PAGE_MASK;
-
-       /* Verify that the region either exists or is a subset of an existing
-        * region.  In the latter case, split the parent region to create 
-        * the exact region 
-        */
-       area = im_get_area(addr, size, 
-                           IM_REGION_EXISTS | IM_REGION_SUBSET);
-       if (area == NULL) {
-               /* Determine whether subset regions exist.  If so, unmap */
-               rc = iounmap_subset_regions(addr, size);
-               if (rc) {
-                       printk(KERN_ERR
-                              "%s() cannot unmap nonexistent range 0x%lx\n",
-                               __FUNCTION__, addr);
-                       return 1;
-               }
-       } else {
-               iounmap((void __iomem *) area->addr);
-       }
-       /*
-        * FIXME! This can't be right:
-       iounmap(area->addr);
-        * Maybe it should be "iounmap(area);"
-        */
-       return 0;
-}
-
-#endif
-
-EXPORT_SYMBOL(ioremap);
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(iounmap);
-
-void free_initmem(void)
-{
-       unsigned long addr;
-
-       addr = (unsigned long)__init_begin;
-       for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
-               memset((void *)addr, 0xcc, PAGE_SIZE);
-               ClearPageReserved(virt_to_page(addr));
-               set_page_count(virt_to_page(addr), 1);
-               free_page(addr);
-               totalram_pages++;
-       }
-       printk ("Freeing unused kernel memory: %luk freed\n",
-               ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       if (start < end)
-               printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-       for (; start < end; start += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(start));
-               set_page_count(virt_to_page(start), 1);
-               free_page(start);
-               totalram_pages++;
-       }
-}
-#endif
-
-static DEFINE_SPINLOCK(mmu_context_lock);
-static DEFINE_IDR(mmu_context_idr);
-
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
-       int index;
-       int err;
-
-again:
-       if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
-               return -ENOMEM;
-
-       spin_lock(&mmu_context_lock);
-       err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
-       spin_unlock(&mmu_context_lock);
-
-       if (err == -EAGAIN)
-               goto again;
-       else if (err)
-               return err;
-
-       if (index > MAX_CONTEXT) {
-               idr_remove(&mmu_context_idr, index);
-               return -ENOMEM;
-       }
-
-       mm->context.id = index;
-
-       return 0;
-}
-
-void destroy_context(struct mm_struct *mm)
-{
-       spin_lock(&mmu_context_lock);
-       idr_remove(&mmu_context_idr, mm->context.id);
-       spin_unlock(&mmu_context_lock);
-
-       mm->context.id = NO_CONTEXT;
-}
-
-/*
- * Do very early mm setup.
- */
-void __init mm_init_ppc64(void)
-{
-#ifndef CONFIG_PPC_ISERIES
-       unsigned long i;
-#endif
-
-       ppc64_boot_msg(0x100, "MM Init");
-
-       /* This is the story of the IO hole... please, keep seated,
-        * unfortunately, we are out of oxygen masks at the moment.
-        * So we need some rough way to tell where your big IO hole
-        * is. On pmac, it's between 2G and 4G, on POWER3, it's around
-        * that area as well, on POWER4 we don't have one, etc...
-        * We need that as a "hint" when sizing the TCE table on POWER3
-        * So far, the simplest way that seem work well enough for us it
-        * to just assume that the first discontinuity in our physical
-        * RAM layout is the IO hole. That may not be correct in the future
-        * (and isn't on iSeries but then we don't care ;)
-        */
-
-#ifndef CONFIG_PPC_ISERIES
-       for (i = 1; i < lmb.memory.cnt; i++) {
-               unsigned long base, prevbase, prevsize;
-
-               prevbase = lmb.memory.region[i-1].base;
-               prevsize = lmb.memory.region[i-1].size;
-               base = lmb.memory.region[i].base;
-               if (base > (prevbase + prevsize)) {
-                       io_hole_start = prevbase + prevsize;
-                       io_hole_size = base  - (prevbase + prevsize);
-                       break;
-               }
-       }
-#endif /* CONFIG_PPC_ISERIES */
-       if (io_hole_start)
-               printk("IO Hole assumed to be %lx -> %lx\n",
-                      io_hole_start, io_hole_start + io_hole_size - 1);
-
-       ppc64_boot_msg(0x100, "MM Init Done");
-}
-
-/*
- * This is called by /dev/mem to know if a given address has to
- * be mapped non-cacheable or not
- */
-int page_is_ram(unsigned long pfn)
-{
-       int i;
-       unsigned long paddr = (pfn << PAGE_SHIFT);
-
-       for (i=0; i < lmb.memory.cnt; i++) {
-               unsigned long base;
-
-               base = lmb.memory.region[i].base;
-
-               if ((paddr >= base) &&
-                       (paddr < (base + lmb.memory.region[i].size))) {
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(page_is_ram);
-
-/*
- * Initialize the bootmem system and give it all the memory we
- * have available.
- */
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-void __init do_init_bootmem(void)
-{
-       unsigned long i;
-       unsigned long start, bootmap_pages;
-       unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
-       int boot_mapsize;
-
-       /*
-        * Find an area to use for the bootmem bitmap.  Calculate the size of
-        * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
-        * Add 1 additional page in case the address isn't page-aligned.
-        */
-       bootmap_pages = bootmem_bootmap_pages(total_pages);
-
-       start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
-       BUG_ON(!start);
-
-       boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
-
-       max_pfn = max_low_pfn;
-
-       /* Add all physical memory to the bootmem map, mark each area
-        * present.
-        */
-       for (i=0; i < lmb.memory.cnt; i++)
-               free_bootmem(lmb.memory.region[i].base,
-                            lmb_size_bytes(&lmb.memory, i));
-
-       /* reserve the sections we're already using */
-       for (i=0; i < lmb.reserved.cnt; i++)
-               reserve_bootmem(lmb.reserved.region[i].base,
-                               lmb_size_bytes(&lmb.reserved, i));
-
-       for (i=0; i < lmb.memory.cnt; i++)
-               memory_present(0, lmb_start_pfn(&lmb.memory, i),
-                              lmb_end_pfn(&lmb.memory, i));
-}
-
-/*
- * paging_init() sets up the page tables - in fact we've already done this.
- */
-void __init paging_init(void)
-{
-       unsigned long zones_size[MAX_NR_ZONES];
-       unsigned long zholes_size[MAX_NR_ZONES];
-       unsigned long total_ram = lmb_phys_mem_size();
-       unsigned long top_of_ram = lmb_end_of_DRAM();
-
-       printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
-              top_of_ram, total_ram);
-       printk(KERN_INFO "Memory hole size: %ldMB\n",
-              (top_of_ram - total_ram) >> 20);
-       /*
-        * All pages are DMA-able so we put them all in the DMA zone.
-        */
-       memset(zones_size, 0, sizeof(zones_size));
-       memset(zholes_size, 0, sizeof(zholes_size));
-
-       zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
-       zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
-
-       free_area_init_node(0, NODE_DATA(0), zones_size,
-                           __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
-}
-#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
-
-static struct kcore_list kcore_vmem;
-
-static int __init setup_kcore(void)
-{
-       int i;
-
-       for (i=0; i < lmb.memory.cnt; i++) {
-               unsigned long base, size;
-               struct kcore_list *kcore_mem;
-
-               base = lmb.memory.region[i].base;
-               size = lmb.memory.region[i].size;
-
-               /* GFP_ATOMIC to avoid might_sleep warnings during boot */
-               kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
-               if (!kcore_mem)
-                       panic("mem_init: kmalloc failed\n");
-
-               kclist_add(kcore_mem, __va(base), size);
-       }
-
-       kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
-
-       return 0;
-}
-module_init(setup_kcore);
-
-void __init mem_init(void)
-{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
-       int nid;
-#endif
-       pg_data_t *pgdat;
-       unsigned long i;
-       struct page *page;
-       unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
-
-       num_physpages = max_low_pfn;    /* RAM is assumed contiguous */
-       high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-
-#ifdef CONFIG_NEED_MULTIPLE_NODES
-        for_each_online_node(nid) {
-               if (NODE_DATA(nid)->node_spanned_pages != 0) {
-                       printk("freeing bootmem node %x\n", nid);
-                       totalram_pages +=
-                               free_all_bootmem_node(NODE_DATA(nid));
-               }
-       }
-#else
-       max_mapnr = num_physpages;
-       totalram_pages += free_all_bootmem();
-#endif
-
-       for_each_pgdat(pgdat) {
-               for (i = 0; i < pgdat->node_spanned_pages; i++) {
-                       page = pgdat_page_nr(pgdat, i);
-                       if (PageReserved(page))
-                               reservedpages++;
-               }
-       }
-
-       codesize = (unsigned long)&_etext - (unsigned long)&_stext;
-       initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
-       datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
-       bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
-
-       printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
-              "%luk reserved, %luk data, %luk bss, %luk init)\n",
-               (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
-               num_physpages << (PAGE_SHIFT-10),
-               codesize >> 10,
-               reservedpages << (PAGE_SHIFT-10),
-               datasize >> 10,
-               bsssize >> 10,
-               initsize >> 10);
-
-       mem_init_done = 1;
-
-       /* Initialize the vDSO */
-       vdso_init();
-}
-
-/*
- * This is called when a page has been modified by the kernel.
- * It just marks the page as not i-cache clean.  We do the i-cache
- * flush later when the page is given to a user process, if necessary.
- */
-void flush_dcache_page(struct page *page)
-{
-       if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
-               return;
-       /* avoid an atomic op if possible */
-       if (test_bit(PG_arch_1, &page->flags))
-               clear_bit(PG_arch_1, &page->flags);
-}
-EXPORT_SYMBOL(flush_dcache_page);
-
-void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
-{
-       clear_page(page);
-
-       if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
-               return;
-       /*
-        * We shouldnt have to do this, but some versions of glibc
-        * require it (ld.so assumes zero filled pages are icache clean)
-        * - Anton
-        */
-
-       /* avoid an atomic op if possible */
-       if (test_bit(PG_arch_1, &pg->flags))
-               clear_bit(PG_arch_1, &pg->flags);
-}
-EXPORT_SYMBOL(clear_user_page);
-
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
-                   struct page *pg)
-{
-       copy_page(vto, vfrom);
-
-       /*
-        * We should be able to use the following optimisation, however
-        * there are two problems.
-        * Firstly a bug in some versions of binutils meant PLT sections
-        * were not marked executable.
-        * Secondly the first word in the GOT section is blrl, used
-        * to establish the GOT address. Until recently the GOT was
-        * not marked executable.
-        * - Anton
-        */
-#if 0
-       if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
-               return;
-#endif
-
-       if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
-               return;
-
-       /* avoid an atomic op if possible */
-       if (test_bit(PG_arch_1, &pg->flags))
-               clear_bit(PG_arch_1, &pg->flags);
-}
-
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
-                            unsigned long addr, int len)
-{
-       unsigned long maddr;
-
-       maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
-       flush_icache_range(maddr, maddr + len);
-}
-EXPORT_SYMBOL(flush_icache_user_range);
-
-/*
- * This is called at the end of handling a user page fault, when the
- * fault has been handled by updating a PTE in the linux page tables.
- * We use it to preload an HPTE into the hash table corresponding to
- * the updated linux PTE.
- * 
- * This must always be called with the mm->page_table_lock held
- */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
-                     pte_t pte)
-{
-       unsigned long vsid;
-       void *pgdir;
-       pte_t *ptep;
-       int local = 0;
-       cpumask_t tmp;
-       unsigned long flags;
-
-       /* handle i-cache coherency */
-       if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
-           !cpu_has_feature(CPU_FTR_NOEXECUTE)) {
-               unsigned long pfn = pte_pfn(pte);
-               if (pfn_valid(pfn)) {
-                       struct page *page = pfn_to_page(pfn);
-                       if (!PageReserved(page)
-                           && !test_bit(PG_arch_1, &page->flags)) {
-                               __flush_dcache_icache(page_address(page));
-                               set_bit(PG_arch_1, &page->flags);
-                       }
-               }
-       }
-
-       /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
-       if (!pte_young(pte))
-               return;
-
-       pgdir = vma->vm_mm->pgd;
-       if (pgdir == NULL)
-               return;
-
-       ptep = find_linux_pte(pgdir, ea);
-       if (!ptep)
-               return;
-
-       vsid = get_vsid(vma->vm_mm->context.id, ea);
-
-       local_irq_save(flags);
-       tmp = cpumask_of_cpu(smp_processor_id());
-       if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
-               local = 1;
-
-       __hash_page(ea, 0, vsid, ptep, 0x300, local);
-       local_irq_restore(flags);
-}
-
-void __iomem * reserve_phb_iospace(unsigned long size)
-{
-       void __iomem *virt_addr;
-               
-       if (phbs_io_bot >= IMALLOC_BASE) 
-               panic("reserve_phb_iospace(): phb io space overflow\n");
-                       
-       virt_addr = (void __iomem *) phbs_io_bot;
-       phbs_io_bot += size;
-
-       return virt_addr;
-}
-
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
-{
-       memset(addr, 0, kmem_cache_size(cache));
-}
-
-static const int pgtable_cache_size[2] = {
-       PTE_TABLE_SIZE, PMD_TABLE_SIZE
-};
-static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
-       "pgd_pte_cache", "pud_pmd_cache",
-};
-
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
-
-void pgtable_cache_init(void)
-{
-       int i;
-
-       BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
-       BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
-       BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
-       BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
-
-       for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
-               int size = pgtable_cache_size[i];
-               const char *name = pgtable_cache_name[i];
-
-               pgtable_cache[i] = kmem_cache_create(name,
-                                                    size, size,
-                                                    SLAB_HWCACHE_ALIGN
-                                                    | SLAB_MUST_HWCACHE_ALIGN,
-                                                    zero_ctor,
-                                                    NULL);
-               if (! pgtable_cache[i])
-                       panic("pgtable_cache_init(): could not create %s!\n",
-                             name);
-       }
-}
-
-pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
-                             unsigned long size, pgprot_t vma_prot)
-{
-       if (ppc_md.phys_mem_access_prot)
-               return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
-
-       if (!page_is_ram(addr >> PAGE_SHIFT))
-               vma_prot = __pgprot(pgprot_val(vma_prot)
-                                   | _PAGE_GUARDED | _PAGE_NO_CACHE);
-       return vma_prot;
-}
-EXPORT_SYMBOL(phys_mem_access_prot);
diff --git a/arch/ppc64/oprofile/Kconfig b/arch/ppc64/oprofile/Kconfig
deleted file mode 100644 (file)
index 5ade198..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-
-menu "Profiling support"
-       depends on EXPERIMENTAL
-
-config PROFILING
-       bool "Profiling support (EXPERIMENTAL)"
-       help
-         Say Y here to enable the extended profiling support mechanisms used
-         by profilers such as OProfile.
-         
-
-config OPROFILE
-       tristate "OProfile system profiling (EXPERIMENTAL)"
-       depends on PROFILING
-       help
-         OProfile is a profiling system capable of profiling the
-         whole system, include the kernel, kernel modules, libraries,
-         and applications.
-
-         If unsure, say N.
-
-endmenu
-
diff --git a/arch/ppc64/oprofile/Makefile b/arch/ppc64/oprofile/Makefile
deleted file mode 100644 (file)
index 162dbf0..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
-               oprof.o cpu_buffer.o buffer_sync.o \
-               event_buffer.o oprofile_files.o \
-               oprofilefs.o oprofile_stats.o \
-               timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o op_model_rs64.o op_model_power4.o
index 87d1f8a..d8c3d8e 100644 (file)
@@ -81,7 +81,7 @@ typedef struct pmac_ide_hwif {
        
 } pmac_ide_hwif_t;
 
-static pmac_ide_hwif_t pmac_ide[MAX_HWIFS] __pmacdata;
+static pmac_ide_hwif_t pmac_ide[MAX_HWIFS];
 static int pmac_ide_count;
 
 enum {
@@ -242,7 +242,7 @@ struct mdma_timings_t {
        int     cycleTime;
 };
 
-struct mdma_timings_t mdma_timings_33[] __pmacdata =
+struct mdma_timings_t mdma_timings_33[] =
 {
     { 240, 240, 480 },
     { 180, 180, 360 },
@@ -255,7 +255,7 @@ struct mdma_timings_t mdma_timings_33[] __pmacdata =
     {   0,   0,   0 }
 };
 
-struct mdma_timings_t mdma_timings_33k[] __pmacdata =
+struct mdma_timings_t mdma_timings_33k[] =
 {
     { 240, 240, 480 },
     { 180, 180, 360 },
@@ -268,7 +268,7 @@ struct mdma_timings_t mdma_timings_33k[] __pmacdata =
     {   0,   0,   0 }
 };
 
-struct mdma_timings_t mdma_timings_66[] __pmacdata =
+struct mdma_timings_t mdma_timings_66[] =
 {
     { 240, 240, 480 },
     { 180, 180, 360 },
@@ -286,7 +286,7 @@ struct {
        int     addrSetup; /* ??? */
        int     rdy2pause;
        int     wrDataSetup;
-} kl66_udma_timings[] __pmacdata =
+} kl66_udma_timings[] =
 {
     {   0, 180,  120 },        /* Mode 0 */
     {   0, 150,  90 }, /*      1 */
@@ -301,7 +301,7 @@ struct kauai_timing {
        u32     timing_reg;
 };
 
-static struct kauai_timing     kauai_pio_timings[] __pmacdata =
+static struct kauai_timing     kauai_pio_timings[] =
 {
        { 930   , 0x08000fff },
        { 600   , 0x08000a92 },
@@ -316,7 +316,7 @@ static struct kauai_timing  kauai_pio_timings[] __pmacdata =
        { 120   , 0x04000148 }
 };
 
-static struct kauai_timing     kauai_mdma_timings[] __pmacdata =
+static struct kauai_timing     kauai_mdma_timings[] =
 {
        { 1260  , 0x00fff000 },
        { 480   , 0x00618000 },
@@ -330,7 +330,7 @@ static struct kauai_timing  kauai_mdma_timings[] __pmacdata =
        { 0     , 0 },
 };
 
-static struct kauai_timing     kauai_udma_timings[] __pmacdata =
+static struct kauai_timing     kauai_udma_timings[] =
 {
        { 120   , 0x000070c0 },
        { 90    , 0x00005d80 },
@@ -341,7 +341,7 @@ static struct kauai_timing  kauai_udma_timings[] __pmacdata =
        { 0     , 0 },
 };
 
-static struct kauai_timing     shasta_pio_timings[] __pmacdata =
+static struct kauai_timing     shasta_pio_timings[] =
 {
        { 930   , 0x08000fff },
        { 600   , 0x0A000c97 },
@@ -356,7 +356,7 @@ static struct kauai_timing  shasta_pio_timings[] __pmacdata =
        { 120   , 0x0400010a }
 };
 
-static struct kauai_timing     shasta_mdma_timings[] __pmacdata =
+static struct kauai_timing     shasta_mdma_timings[] =
 {
        { 1260  , 0x00fff000 },
        { 480   , 0x00820800 },
@@ -370,7 +370,7 @@ static struct kauai_timing  shasta_mdma_timings[] __pmacdata =
        { 0     , 0 },
 };
 
-static struct kauai_timing     shasta_udma133_timings[] __pmacdata =
+static struct kauai_timing     shasta_udma133_timings[] =
 {
        { 120   , 0x00035901, },
        { 90    , 0x000348b1, },
@@ -522,7 +522,7 @@ pmu_hd_blink_init(void)
  * N.B. this can't be an initfunc, because the media-bay task can
  * call ide_[un]register at any time.
  */
-void __pmac
+void
 pmac_ide_init_hwif_ports(hw_regs_t *hw,
                              unsigned long data_port, unsigned long ctrl_port,
                              int *irq)
@@ -559,7 +559,7 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw,
  * timing register when selecting that unit. This version is for
  * ASICs with a single timing register
  */
-static void __pmac
+static void
 pmac_ide_selectproc(ide_drive_t *drive)
 {
        pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -579,7 +579,7 @@ pmac_ide_selectproc(ide_drive_t *drive)
  * timing register when selecting that unit. This version is for
  * ASICs with a dual timing register (Kauai)
  */
-static void __pmac
+static void
 pmac_ide_kauai_selectproc(ide_drive_t *drive)
 {
        pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -600,7 +600,7 @@ pmac_ide_kauai_selectproc(ide_drive_t *drive)
 /*
  * Force an update of controller timing values for a given drive
  */
-static void __pmac
+static void
 pmac_ide_do_update_timings(ide_drive_t *drive)
 {
        pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -633,7 +633,7 @@ pmac_outbsync(ide_drive_t *drive, u8 value, unsigned long port)
  * to sort that out sooner or later and see if I can finally get the
  * common version to work properly in all cases
  */
-static int __pmac
+static int
 pmac_ide_do_setfeature(ide_drive_t *drive, u8 command)
 {
        ide_hwif_t *hwif = HWIF(drive);
@@ -710,7 +710,7 @@ out:
 /*
  * Old tuning functions (called on hdparm -p), sets up drive PIO timings
  */
-static void __pmac
+static void
 pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
 {
        ide_pio_data_t d;
@@ -801,7 +801,7 @@ pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
 /*
  * Calculate KeyLargo ATA/66 UDMA timings
  */
-static int __pmac
+static int
 set_timings_udma_ata4(u32 *timings, u8 speed)
 {
        unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
@@ -829,7 +829,7 @@ set_timings_udma_ata4(u32 *timings, u8 speed)
 /*
  * Calculate Kauai ATA/100 UDMA timings
  */
-static int __pmac
+static int
 set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
 {
        struct ide_timing *t = ide_timing_find_mode(speed);
@@ -849,7 +849,7 @@ set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
 /*
  * Calculate Shasta ATA/133 UDMA timings
  */
-static int __pmac
+static int
 set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
 {
        struct ide_timing *t = ide_timing_find_mode(speed);
@@ -869,7 +869,7 @@ set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
 /*
  * Calculate MDMA timings for all cells
  */
-static int __pmac
+static int
 set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
                        u8 speed, int drive_cycle_time)
 {
@@ -1014,7 +1014,7 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
  * our dedicated function is more precise as it uses the drive provided
  * cycle time value. We should probably fix this one to deal with that too...
  */
-static int __pmac
+static int
 pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
 {
        int unit = (drive->select.b.unit & 0x01);
@@ -1092,7 +1092,7 @@ pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
  * Blast some well known "safe" values to the timing registers at init or
  * wakeup from sleep time, before we do real calculation
  */
-static void __pmac
+static void
 sanitize_timings(pmac_ide_hwif_t *pmif)
 {
        unsigned int value, value2 = 0;
@@ -1123,13 +1123,13 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
        pmif->timings[2] = pmif->timings[3] = value2;
 }
 
-unsigned long __pmac
+unsigned long
 pmac_ide_get_base(int index)
 {
        return pmac_ide[index].regbase;
 }
 
-int __pmac
+int
 pmac_ide_check_base(unsigned long base)
 {
        int ix;
@@ -1140,7 +1140,7 @@ pmac_ide_check_base(unsigned long base)
        return -1;
 }
 
-int __pmac
+int
 pmac_ide_get_irq(unsigned long base)
 {
        int ix;
@@ -1151,7 +1151,7 @@ pmac_ide_get_irq(unsigned long base)
        return 0;
 }
 
-static int ide_majors[]  __pmacdata = { 3, 22, 33, 34, 56, 57 };
+static int ide_majors[] = { 3, 22, 33, 34, 56, 57 };
 
 dev_t __init
 pmac_find_ide_boot(char *bootdevice, int n)
@@ -1701,7 +1701,7 @@ pmac_ide_probe(void)
  * pmac_ide_build_dmatable builds the DBDMA command list
  * for a transfer and sets the DBDMA channel to point to it.
  */
-static int __pmac
+static int
 pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
 {
        struct dbdma_cmd *table;
@@ -1785,7 +1785,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
 }
 
 /* Teardown mappings after DMA has completed.  */
-static void __pmac
+static void
 pmac_ide_destroy_dmatable (ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
@@ -1802,7 +1802,7 @@ pmac_ide_destroy_dmatable (ide_drive_t *drive)
 /*
  * Pick up best MDMA timing for the drive and apply it
  */
-static int __pmac
+static int
 pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
 {
        ide_hwif_t *hwif = HWIF(drive);
@@ -1859,7 +1859,7 @@ pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
 /*
  * Pick up best UDMA timing for the drive and apply it
  */
-static int __pmac
+static int
 pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
 {
        ide_hwif_t *hwif = HWIF(drive);
@@ -1915,7 +1915,7 @@ pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
  * Check what is the best DMA timing setting for the drive and
  * call appropriate functions to apply it.
  */
-static int __pmac
+static int
 pmac_ide_dma_check(ide_drive_t *drive)
 {
        struct hd_driveid *id = drive->id;
@@ -1967,7 +1967,7 @@ pmac_ide_dma_check(ide_drive_t *drive)
  * Prepare a DMA transfer. We build the DMA table, adjust the timings for
  * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
  */
-static int __pmac
+static int
 pmac_ide_dma_setup(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = HWIF(drive);
@@ -1997,7 +1997,7 @@ pmac_ide_dma_setup(ide_drive_t *drive)
        return 0;
 }
 
-static void __pmac
+static void
 pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
 {
        /* issue cmd to drive */
@@ -2008,7 +2008,7 @@ pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
  * Kick the DMA controller into life after the DMA command has been issued
  * to the drive.
  */
-static void __pmac
+static void
 pmac_ide_dma_start(ide_drive_t *drive)
 {
        pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2024,7 +2024,7 @@ pmac_ide_dma_start(ide_drive_t *drive)
 /*
  * After a DMA transfer, make sure the controller is stopped
  */
-static int __pmac
+static int
 pmac_ide_dma_end (ide_drive_t *drive)
 {
        pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2052,7 +2052,7 @@ pmac_ide_dma_end (ide_drive_t *drive)
  * that's not implemented yet), on the other hand, we don't have shared interrupts
  * so it's not really a problem
  */
-static int __pmac
+static int
 pmac_ide_dma_test_irq (ide_drive_t *drive)
 {
        pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2108,19 +2108,19 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
        return 1;
 }
 
-static int __pmac
+static int
 pmac_ide_dma_host_off (ide_drive_t *drive)
 {
        return 0;
 }
 
-static int __pmac
+static int
 pmac_ide_dma_host_on (ide_drive_t *drive)
 {
        return 0;
 }
 
-static int __pmac
+static int
 pmac_ide_dma_lostirq (ide_drive_t *drive)
 {
        pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
index 5e0811d..2b8a6e8 100644 (file)
@@ -27,7 +27,7 @@ static volatile unsigned char __iomem *anslcd_ptr;
 
 #undef DEBUG
 
-static void __pmac
+static void
 anslcd_write_byte_ctrl ( unsigned char c )
 {
 #ifdef DEBUG
@@ -43,14 +43,14 @@ anslcd_write_byte_ctrl ( unsigned char c )
        }
 }
 
-static void __pmac
+static void
 anslcd_write_byte_data ( unsigned char c )
 {
        out_8(anslcd_ptr + ANSLCD_DATA_IX, c);
        udelay(anslcd_short_delay);
 }
 
-static ssize_t __pmac
+static ssize_t
 anslcd_write( struct file * file, const char __user * buf, 
                                size_t count, loff_t *ppos )
 {
@@ -73,7 +73,7 @@ anslcd_write( struct file * file, const char __user * buf,
        return p - buf;
 }
 
-static int __pmac
+static int
 anslcd_ioctl( struct inode * inode, struct file * file,
                                unsigned int cmd, unsigned long arg )
 {
@@ -115,7 +115,7 @@ anslcd_ioctl( struct inode * inode, struct file * file,
        }
 }
 
-static int __pmac
+static int
 anslcd_open( struct inode * inode, struct file * file )
 {
        return 0;
index c0712a1..b856bb6 100644 (file)
@@ -167,19 +167,19 @@ enum {
  * Functions for polling content of media bay
  */
  
-static u8 __pmac
+static u8
 ohare_mb_content(struct media_bay_info *bay)
 {
        return (MB_IN32(bay, OHARE_MBCR) >> 12) & 7;
 }
 
-static u8 __pmac
+static u8
 heathrow_mb_content(struct media_bay_info *bay)
 {
        return (MB_IN32(bay, HEATHROW_MBCR) >> 12) & 7;
 }
 
-static u8 __pmac
+static u8
 keylargo_mb_content(struct media_bay_info *bay)
 {
        int new_gpio;
@@ -205,7 +205,7 @@ keylargo_mb_content(struct media_bay_info *bay)
  * into reset state as well
  */
 
-static void __pmac
+static void
 ohare_mb_power(struct media_bay_info* bay, int on_off)
 {
        if (on_off) {
@@ -224,7 +224,7 @@ ohare_mb_power(struct media_bay_info* bay, int on_off)
        MB_BIC(bay, OHARE_MBCR, 0x00000F00);
 }
 
-static void __pmac
+static void
 heathrow_mb_power(struct media_bay_info* bay, int on_off)
 {
        if (on_off) {
@@ -243,7 +243,7 @@ heathrow_mb_power(struct media_bay_info* bay, int on_off)
        MB_BIC(bay, HEATHROW_MBCR, 0x00000F00);
 }
 
-static void __pmac
+static void
 keylargo_mb_power(struct media_bay_info* bay, int on_off)
 {
        if (on_off) {
@@ -267,7 +267,7 @@ keylargo_mb_power(struct media_bay_info* bay, int on_off)
  * enable the related busses
  */
 
-static int __pmac
+static int
 ohare_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
 {
        switch(device_id) {
@@ -287,7 +287,7 @@ ohare_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
        return -ENODEV;
 }
 
-static int __pmac
+static int
 heathrow_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
 {
        switch(device_id) {
@@ -307,7 +307,7 @@ heathrow_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
        return -ENODEV;
 }
 
-static int __pmac
+static int
 keylargo_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
 {
        switch(device_id) {
@@ -330,43 +330,43 @@ keylargo_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
  * Functions for tweaking resets
  */
 
-static void __pmac
+static void
 ohare_mb_un_reset(struct media_bay_info* bay)
 {
        MB_BIS(bay, OHARE_FCR, OH_BAY_RESET_N);
 }
 
-static void __pmac keylargo_mb_init(struct media_bay_info *bay)
+static void keylargo_mb_init(struct media_bay_info *bay)
 {
        MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_ENABLE);
 }
 
-static void __pmac heathrow_mb_un_reset(struct media_bay_info* bay)
+static void heathrow_mb_un_reset(struct media_bay_info* bay)
 {
        MB_BIS(bay, HEATHROW_FCR, HRW_BAY_RESET_N);
 }
 
-static void __pmac keylargo_mb_un_reset(struct media_bay_info* bay)
+static void keylargo_mb_un_reset(struct media_bay_info* bay)
 {
        MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_DEV_RESET);
 }
 
-static void __pmac ohare_mb_un_reset_ide(struct media_bay_info* bay)
+static void ohare_mb_un_reset_ide(struct media_bay_info* bay)
 {
        MB_BIS(bay, OHARE_FCR, OH_IDE1_RESET_N);
 }
 
-static void __pmac heathrow_mb_un_reset_ide(struct media_bay_info* bay)
+static void heathrow_mb_un_reset_ide(struct media_bay_info* bay)
 {
        MB_BIS(bay, HEATHROW_FCR, HRW_IDE1_RESET_N);
 }
 
-static void __pmac keylargo_mb_un_reset_ide(struct media_bay_info* bay)
+static void keylargo_mb_un_reset_ide(struct media_bay_info* bay)
 {
        MB_BIS(bay, KEYLARGO_FCR1, KL1_EIDE0_RESET_N);
 }
 
-static inline void __pmac set_mb_power(struct media_bay_info* bay, int onoff)
+static inline void set_mb_power(struct media_bay_info* bay, int onoff)
 {
        /* Power up up and assert the bay reset line */
        if (onoff) {
@@ -382,7 +382,7 @@ static inline void __pmac set_mb_power(struct media_bay_info* bay, int onoff)
        bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
 }
 
-static void __pmac poll_media_bay(struct media_bay_info* bay)
+static void poll_media_bay(struct media_bay_info* bay)
 {
        int id = bay->ops->content(bay);
 
@@ -415,7 +415,7 @@ static void __pmac poll_media_bay(struct media_bay_info* bay)
        }
 }
 
-int __pmac check_media_bay(struct device_node *which_bay, int what)
+int check_media_bay(struct device_node *which_bay, int what)
 {
 #ifdef CONFIG_BLK_DEV_IDE
        int     i;
@@ -432,7 +432,7 @@ int __pmac check_media_bay(struct device_node *which_bay, int what)
 }
 EXPORT_SYMBOL(check_media_bay);
 
-int __pmac check_media_bay_by_base(unsigned long base, int what)
+int check_media_bay_by_base(unsigned long base, int what)
 {
 #ifdef CONFIG_BLK_DEV_IDE
        int     i;
@@ -449,7 +449,7 @@ int __pmac check_media_bay_by_base(unsigned long base, int what)
        return -ENODEV;
 }
 
-int __pmac media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
+int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
        int irq, int index)
 {
 #ifdef CONFIG_BLK_DEV_IDE
@@ -489,7 +489,7 @@ int __pmac media_bay_set_ide_infos(struct device_node* which_bay, unsigned long
        return -ENODEV;
 }
 
-static void __pmac media_bay_step(int i)
+static void media_bay_step(int i)
 {
        struct media_bay_info* bay = &media_bays[i];
 
@@ -619,7 +619,7 @@ static void __pmac media_bay_step(int i)
  * with the IDE driver.  It needs to be a thread because
  * ide_register can't be called from interrupt context.
  */
-static int __pmac media_bay_task(void *x)
+static int media_bay_task(void *x)
 {
        int     i;
 
@@ -704,7 +704,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
 
 }
 
-static int __pmac media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
+static int media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
 {
        struct media_bay_info   *bay = macio_get_drvdata(mdev);
 
@@ -719,7 +719,7 @@ static int __pmac media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
        return 0;
 }
 
-static int __pmac media_bay_resume(struct macio_dev *mdev)
+static int media_bay_resume(struct macio_dev *mdev)
 {
        struct media_bay_info   *bay = macio_get_drvdata(mdev);
 
@@ -760,7 +760,7 @@ static int __pmac media_bay_resume(struct macio_dev *mdev)
 
 /* Definitions of "ops" structures.
  */
-static struct mb_ops ohare_mb_ops __pmacdata = {
+static struct mb_ops ohare_mb_ops = {
        .name           = "Ohare",
        .content        = ohare_mb_content,
        .power          = ohare_mb_power,
@@ -769,7 +769,7 @@ static struct mb_ops ohare_mb_ops __pmacdata = {
        .un_reset_ide   = ohare_mb_un_reset_ide,
 };
 
-static struct mb_ops heathrow_mb_ops __pmacdata = {
+static struct mb_ops heathrow_mb_ops = {
        .name           = "Heathrow",
        .content        = heathrow_mb_content,
        .power          = heathrow_mb_power,
@@ -778,7 +778,7 @@ static struct mb_ops heathrow_mb_ops __pmacdata = {
        .un_reset_ide   = heathrow_mb_un_reset_ide,
 };
 
-static struct mb_ops keylargo_mb_ops __pmacdata = {
+static struct mb_ops keylargo_mb_ops = {
        .name           = "KeyLargo",
        .init           = keylargo_mb_init,
        .content        = keylargo_mb_content,
index 9b38674..34f3c7e 100644 (file)
@@ -1094,7 +1094,7 @@ static int smu_release(struct inode *inode, struct file *file)
 }
 
 
-static struct file_operations smu_device_fops __pmacdata = {
+static struct file_operations smu_device_fops = {
        .llseek         = no_llseek,
        .read           = smu_read,
        .write          = smu_write,
@@ -1103,7 +1103,7 @@ static struct file_operations smu_device_fops __pmacdata = {
        .release        = smu_release,
 };
 
-static struct miscdevice pmu_device __pmacdata = {
+static struct miscdevice pmu_device = {
        MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
 };
 
index 417deb5..d843a6c 100644 (file)
@@ -37,7 +37,6 @@ static DEFINE_SPINLOCK(cuda_lock);
 
 #ifdef CONFIG_MAC
 #define CUDA_IRQ IRQ_MAC_ADB
-#define __openfirmware
 #define eieio()
 #else
 #define CUDA_IRQ vias->intrs[0].line
index 645a2e5..91920a1 100644 (file)
@@ -244,7 +244,7 @@ int pmu_wink(struct adb_request *req);
  * - the number of response bytes which the PMU will return, or
  *   -1 if it will send a length byte.
  */
-static const s8 pmu_data_len[256][2] __openfirmwaredata = {
+static const s8 pmu_data_len[256][2] = {
 /*        0       1       2       3       4       5       6       7  */
 /*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
 /*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
@@ -295,7 +295,7 @@ static struct backlight_controller pmu_backlight_controller = {
 };
 #endif /* CONFIG_PMAC_BACKLIGHT */
 
-int __openfirmware
+int
 find_via_pmu(void)
 {
        if (via != 0)
@@ -374,7 +374,7 @@ find_via_pmu(void)
 }
 
 #ifdef CONFIG_ADB
-static int __openfirmware
+static int
 pmu_probe(void)
 {
        return vias == NULL? -ENODEV: 0;
@@ -405,7 +405,7 @@ static int __init via_pmu_start(void)
        bright_req_2.complete = 1;
        batt_req.complete = 1;
 
-#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC32) && !defined(CONFIG_PPC_MERGE)
        if (pmu_kind == PMU_KEYLARGO_BASED)
                openpic_set_irq_priority(vias->intrs[0].line,
                                         OPENPIC_PRIORITY_DEFAULT + 1);
@@ -520,7 +520,7 @@ static int __init via_pmu_dev_init(void)
 
 device_initcall(via_pmu_dev_init);
 
-static int __openfirmware
+static int
 init_pmu(void)
 {
        int timeout;
@@ -588,17 +588,6 @@ pmu_get_model(void)
        return pmu_kind;
 }
 
-#ifndef CONFIG_PPC64
-static inline void wakeup_decrementer(void)
-{
-       set_dec(tb_ticks_per_jiffy);
-       /* No currently-supported powerbook has a 601,
-        * so use get_tbl, not native
-        */
-       last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
-}
-#endif
-
 static void pmu_set_server_mode(int server_mode)
 {
        struct adb_request req;
@@ -625,7 +614,7 @@ static void pmu_set_server_mode(int server_mode)
 /* This new version of the code for 2400/3400/3500 powerbooks
  * is inspired from the implementation in gkrellm-pmu
  */
-static void __pmac
+static void
 done_battery_state_ohare(struct adb_request* req)
 {
        /* format:
@@ -713,7 +702,7 @@ done_battery_state_ohare(struct adb_request* req)
        clear_bit(0, &async_req_locks);
 }
 
-static void __pmac
+static void
 done_battery_state_smart(struct adb_request* req)
 {
        /* format:
@@ -791,7 +780,7 @@ done_battery_state_smart(struct adb_request* req)
        clear_bit(0, &async_req_locks);
 }
 
-static void __pmac
+static void
 query_battery_state(void)
 {
        if (test_and_set_bit(0, &async_req_locks))
@@ -804,7 +793,7 @@ query_battery_state(void)
                        2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1);
 }
 
-static int __pmac
+static int
 proc_get_info(char *page, char **start, off_t off,
                int count, int *eof, void *data)
 {
@@ -819,7 +808,7 @@ proc_get_info(char *page, char **start, off_t off,
        return p - page;
 }
 
-static int __pmac
+static int
 proc_get_irqstats(char *page, char **start, off_t off,
                  int count, int *eof, void *data)
 {
@@ -846,7 +835,7 @@ proc_get_irqstats(char *page, char **start, off_t off,
        return p - page;
 }
 
-static int __pmac
+static int
 proc_get_batt(char *page, char **start, off_t off,
                int count, int *eof, void *data)
 {
@@ -870,7 +859,7 @@ proc_get_batt(char *page, char **start, off_t off,
        return p - page;
 }
 
-static int __pmac
+static int
 proc_read_options(char *page, char **start, off_t off,
                        int count, int *eof, void *data)
 {
@@ -887,7 +876,7 @@ proc_read_options(char *page, char **start, off_t off,
        return p - page;
 }
                        
-static int __pmac
+static int
 proc_write_options(struct file *file, const char __user *buffer,
                        unsigned long count, void *data)
 {
@@ -934,7 +923,7 @@ proc_write_options(struct file *file, const char __user *buffer,
 
 #ifdef CONFIG_ADB
 /* Send an ADB command */
-static int __pmac
+static int
 pmu_send_request(struct adb_request *req, int sync)
 {
        int i, ret;
@@ -1014,7 +1003,7 @@ pmu_send_request(struct adb_request *req, int sync)
 }
 
 /* Enable/disable autopolling */
-static int __pmac
+static int
 pmu_adb_autopoll(int devs)
 {
        struct adb_request req;
@@ -1037,7 +1026,7 @@ pmu_adb_autopoll(int devs)
 }
 
 /* Reset the ADB bus */
-static int __pmac
+static int
 pmu_adb_reset_bus(void)
 {
        struct adb_request req;
@@ -1072,7 +1061,7 @@ pmu_adb_reset_bus(void)
 #endif /* CONFIG_ADB */
 
 /* Construct and send a pmu request */
-int __openfirmware
+int
 pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
            int nbytes, ...)
 {
@@ -1098,7 +1087,7 @@ pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
        return pmu_queue_request(req);
 }
 
-int __pmac
+int
 pmu_queue_request(struct adb_request *req)
 {
        unsigned long flags;
@@ -1190,7 +1179,7 @@ pmu_done(struct adb_request *req)
                (*done)(req);
 }
 
-static void __pmac
+static void
 pmu_start(void)
 {
        struct adb_request *req;
@@ -1214,7 +1203,7 @@ pmu_start(void)
        send_byte(req->data[0]);
 }
 
-void __openfirmware
+void
 pmu_poll(void)
 {
        if (!via)
@@ -1224,7 +1213,7 @@ pmu_poll(void)
        via_pmu_interrupt(0, NULL, NULL);
 }
 
-void __openfirmware
+void
 pmu_poll_adb(void)
 {
        if (!via)
@@ -1239,7 +1228,7 @@ pmu_poll_adb(void)
                || req_awaiting_reply));
 }
 
-void __openfirmware
+void
 pmu_wait_complete(struct adb_request *req)
 {
        if (!via)
@@ -1253,7 +1242,7 @@ pmu_wait_complete(struct adb_request *req)
  * This is done to avoid spurrious shutdowns when we know we'll have
  * interrupts switched off for a long time
  */
-void __openfirmware
+void
 pmu_suspend(void)
 {
        unsigned long flags;
@@ -1293,7 +1282,7 @@ pmu_suspend(void)
        } while (1);
 }
 
-void __openfirmware
+void
 pmu_resume(void)
 {
        unsigned long flags;
@@ -1323,7 +1312,7 @@ pmu_resume(void)
 }
 
 /* Interrupt data could be the result data from an ADB cmd */
-static void __pmac
+static void
 pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs)
 {
        unsigned char ints, pirq;
@@ -1435,7 +1424,7 @@ next:
        goto next;
 }
 
-static struct adb_request* __pmac
+static struct adb_request*
 pmu_sr_intr(struct pt_regs *regs)
 {
        struct adb_request *req;
@@ -1541,7 +1530,7 @@ pmu_sr_intr(struct pt_regs *regs)
        return NULL;
 }
 
-static irqreturn_t __pmac
+static irqreturn_t
 via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
 {
        unsigned long flags;
@@ -1629,7 +1618,7 @@ no_free_slot:
        return IRQ_RETVAL(handled);
 }
 
-void __pmac
+void
 pmu_unlock(void)
 {
        unsigned long flags;
@@ -1642,7 +1631,7 @@ pmu_unlock(void)
 }
 
 
-static irqreturn_t __pmac
+static irqreturn_t
 gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
 {
        unsigned long flags;
@@ -1663,12 +1652,12 @@ gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
 }
 
 #ifdef CONFIG_PMAC_BACKLIGHT
-static int backlight_to_bright[] __pmacdata = {
+static int backlight_to_bright[] = {
        0x7f, 0x46, 0x42, 0x3e, 0x3a, 0x36, 0x32, 0x2e,
        0x2a, 0x26, 0x22, 0x1e, 0x1a, 0x16, 0x12, 0x0e
 };
  
-static int __openfirmware
+static int
 pmu_set_backlight_enable(int on, int level, void* data)
 {
        struct adb_request req;
@@ -1688,7 +1677,7 @@ pmu_set_backlight_enable(int on, int level, void* data)
        return 0;
 }
 
-static void __openfirmware
+static void
 pmu_bright_complete(struct adb_request *req)
 {
        if (req == &bright_req_1)
@@ -1697,7 +1686,7 @@ pmu_bright_complete(struct adb_request *req)
                clear_bit(2, &async_req_locks);
 }
 
-static int __openfirmware
+static int
 pmu_set_backlight_level(int level, void* data)
 {
        if (vias == NULL)
@@ -1717,7 +1706,7 @@ pmu_set_backlight_level(int level, void* data)
 }
 #endif /* CONFIG_PMAC_BACKLIGHT */
 
-void __pmac
+void
 pmu_enable_irled(int on)
 {
        struct adb_request req;
@@ -1732,7 +1721,7 @@ pmu_enable_irled(int on)
        pmu_wait_complete(&req);
 }
 
-void __pmac
+void
 pmu_restart(void)
 {
        struct adb_request req;
@@ -1757,7 +1746,7 @@ pmu_restart(void)
                ;
 }
 
-void __pmac
+void
 pmu_shutdown(void)
 {
        struct adb_request req;
@@ -2076,7 +2065,7 @@ pmu_unregister_sleep_notifier(struct pmu_sleep_notifier* n)
 }
 
 /* Sleep is broadcast last-to-first */
-static int __pmac
+static int
 broadcast_sleep(int when, int fallback)
 {
        int ret = PBOOK_SLEEP_OK;
@@ -2101,7 +2090,7 @@ broadcast_sleep(int when, int fallback)
 }
 
 /* Wake is broadcast first-to-last */
-static int __pmac
+static int
 broadcast_wake(void)
 {
        int ret = PBOOK_SLEEP_OK;
@@ -2132,7 +2121,7 @@ static struct pci_save {
 } *pbook_pci_saves;
 static int pbook_npci_saves;
 
-static void __pmac
+static void
 pbook_alloc_pci_save(void)
 {
        int npci;
@@ -2149,7 +2138,7 @@ pbook_alloc_pci_save(void)
        pbook_npci_saves = npci;
 }
 
-static void __pmac
+static void
 pbook_free_pci_save(void)
 {
        if (pbook_pci_saves == NULL)
@@ -2159,7 +2148,7 @@ pbook_free_pci_save(void)
        pbook_npci_saves = 0;
 }
 
-static void __pmac
+static void
 pbook_pci_save(void)
 {
        struct pci_save *ps = pbook_pci_saves;
@@ -2190,7 +2179,7 @@ pbook_pci_save(void)
  * during boot, it will be in the pci dev list. If it's disabled at this point
  * (and it will probably be), then you can't access it's config space.
  */
-static void __pmac
+static void
 pbook_pci_restore(void)
 {
        u16 cmd;
@@ -2238,7 +2227,7 @@ pbook_pci_restore(void)
 
 #ifdef DEBUG_SLEEP
 /* N.B. This doesn't work on the 3400 */
-void  __pmac
+void 
 pmu_blink(int n)
 {
        struct adb_request req;
@@ -2277,9 +2266,9 @@ pmu_blink(int n)
  * Put the powerbook to sleep.
  */
  
-static u32 save_via[8] __pmacdata;
+static u32 save_via[8];
 
-static void __pmac
+static void
 save_via_state(void)
 {
        save_via[0] = in_8(&via[ANH]);
@@ -2291,7 +2280,7 @@ save_via_state(void)
        save_via[6] = in_8(&via[T1CL]);
        save_via[7] = in_8(&via[T1CH]);
 }
-static void __pmac
+static void
 restore_via_state(void)
 {
        out_8(&via[ANH], save_via[0]);
@@ -2307,7 +2296,7 @@ restore_via_state(void)
        out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
 }
 
-static int __pmac
+static int
 pmac_suspend_devices(void)
 {
        int ret;
@@ -2397,7 +2386,7 @@ pmac_suspend_devices(void)
        return 0;
 }
 
-static int __pmac
+static int
 pmac_wakeup_devices(void)
 {
        mdelay(100);
@@ -2436,7 +2425,7 @@ pmac_wakeup_devices(void)
 #define        GRACKLE_NAP     (1<<4)
 #define        GRACKLE_SLEEP   (1<<3)
 
-int __pmac
+int
 powerbook_sleep_grackle(void)
 {
        unsigned long save_l2cr;
@@ -2520,7 +2509,7 @@ powerbook_sleep_grackle(void)
        return 0;
 }
 
-static int __pmac
+static int
 powerbook_sleep_Core99(void)
 {
        unsigned long save_l2cr;
@@ -2620,7 +2609,7 @@ powerbook_sleep_Core99(void)
 #define PB3400_MEM_CTRL                0xf8000000
 #define PB3400_MEM_CTRL_SLEEP  0x70
 
-static int __pmac
+static int
 powerbook_sleep_3400(void)
 {
        int ret, i, x;
@@ -2720,9 +2709,9 @@ struct pmu_private {
 };
 
 static LIST_HEAD(all_pmu_pvt);
-static DEFINE_SPINLOCK(all_pvt_lock __pmacdata);
+static DEFINE_SPINLOCK(all_pvt_lock);
 
-static void __pmac
+static void
 pmu_pass_intr(unsigned char *data, int len)
 {
        struct pmu_private *pp;
@@ -2751,7 +2740,7 @@ pmu_pass_intr(unsigned char *data, int len)
        spin_unlock_irqrestore(&all_pvt_lock, flags);
 }
 
-static int __pmac
+static int
 pmu_open(struct inode *inode, struct file *file)
 {
        struct pmu_private *pp;
@@ -2773,7 +2762,7 @@ pmu_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-static ssize_t  __pmac
+static ssize_t 
 pmu_read(struct file *file, char __user *buf,
                        size_t count, loff_t *ppos)
 {
@@ -2825,14 +2814,14 @@ pmu_read(struct file *file, char __user *buf,
        return ret;
 }
 
-static ssize_t __pmac
+static ssize_t
 pmu_write(struct file *file, const char __user *buf,
                         size_t count, loff_t *ppos)
 {
        return 0;
 }
 
-static unsigned int __pmac
+static unsigned int
 pmu_fpoll(struct file *filp, poll_table *wait)
 {
        struct pmu_private *pp = filp->private_data;
@@ -2849,7 +2838,7 @@ pmu_fpoll(struct file *filp, poll_table *wait)
        return mask;
 }
 
-static int __pmac
+static int
 pmu_release(struct inode *inode, struct file *file)
 {
        struct pmu_private *pp = file->private_data;
@@ -2874,8 +2863,7 @@ pmu_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-/* Note: removed __openfirmware here since it causes link errors */
-static int __pmac
+static int
 pmu_ioctl(struct inode * inode, struct file *filp,
                     u_int cmd, u_long arg)
 {
@@ -2957,7 +2945,7 @@ pmu_ioctl(struct inode * inode, struct file *filp,
        return error;
 }
 
-static struct file_operations pmu_device_fops __pmacdata = {
+static struct file_operations pmu_device_fops = {
        .read           = pmu_read,
        .write          = pmu_write,
        .poll           = pmu_fpoll,
@@ -2966,7 +2954,7 @@ static struct file_operations pmu_device_fops __pmacdata = {
        .release        = pmu_release,
 };
 
-static struct miscdevice pmu_device __pmacdata = {
+static struct miscdevice pmu_device = {
        PMU_MINOR, "pmu", &pmu_device_fops
 };
 
@@ -2982,7 +2970,7 @@ device_initcall(pmu_device_init);
 
 
 #ifdef DEBUG_SLEEP
-static inline void  __pmac
+static inline void 
 polled_handshake(volatile unsigned char __iomem *via)
 {
        via[B] &= ~TREQ; eieio();
@@ -2993,7 +2981,7 @@ polled_handshake(volatile unsigned char __iomem *via)
                ;
 }
 
-static inline void  __pmac
+static inline void 
 polled_send_byte(volatile unsigned char __iomem *via, int x)
 {
        via[ACR] |= SR_OUT | SR_EXT; eieio();
@@ -3001,7 +2989,7 @@ polled_send_byte(volatile unsigned char __iomem *via, int x)
        polled_handshake(via);
 }
 
-static inline int __pmac
+static inline int
 polled_recv_byte(volatile unsigned char __iomem *via)
 {
        int x;
@@ -3013,7 +3001,7 @@ polled_recv_byte(volatile unsigned char __iomem *via)
        return x;
 }
 
-int __pmac
+int
 pmu_polled_request(struct adb_request *req)
 {
        unsigned long flags;
index 820dc52..6f80d76 100644 (file)
@@ -835,7 +835,7 @@ static struct pci_save {
 } *pbook_pci_saves;
 static int n_pbook_pci_saves;
 
-static inline void __openfirmware
+static inline void
 pbook_pci_save(void)
 {
        int npci;
@@ -863,7 +863,7 @@ pbook_pci_save(void)
        }
 }
 
-static inline void __openfirmware
+static inline void
 pbook_pci_restore(void)
 {
        u16 cmd;
@@ -902,7 +902,7 @@ pbook_pci_restore(void)
 #define IRQ_ENABLE     ((unsigned int *)0xf3000024)
 #define MEM_CTRL       ((unsigned int *)0xf8000070)
 
-int __openfirmware powerbook_sleep(void)
+int powerbook_sleep(void)
 {
        int ret, i, x;
        static int save_backlight;
@@ -1001,25 +1001,24 @@ int __openfirmware powerbook_sleep(void)
 /*
  * Support for /dev/pmu device
  */
-static int __openfirmware pmu_open(struct inode *inode, struct file *file)
+static int pmu_open(struct inode *inode, struct file *file)
 {
        return 0;
 }
 
-static ssize_t __openfirmware pmu_read(struct file *file, char *buf,
+static ssize_t pmu_read(struct file *file, char *buf,
                        size_t count, loff_t *ppos)
 {
        return 0;
 }
 
-static ssize_t __openfirmware pmu_write(struct file *file, const char *buf,
+static ssize_t pmu_write(struct file *file, const char *buf,
                         size_t count, loff_t *ppos)
 {
        return 0;
 }
 
-/* Note: removed __openfirmware here since it causes link errors */
-static int /*__openfirmware*/ pmu_ioctl(struct inode * inode, struct file *filp,
+static int pmu_ioctl(struct inode * inode, struct file *filp,
                     u_int cmd, u_long arg)
 {
        int error;
index 3d56cf5..db3bc2f 100644 (file)
@@ -70,8 +70,9 @@
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <linux/ethtool.h>
+
+#include <asm/abs_addr.h>
 #include <asm/iSeries/mf.h>
-#include <asm/iSeries/iSeries_pci.h>
 #include <asm/uaccess.h>
 
 #include <asm/iSeries/HvLpConfig.h>
@@ -1397,13 +1398,13 @@ static inline void veth_build_dma_list(struct dma_chunk *list,
         * it just at the granularity of iSeries real->absolute
         * mapping?  Indeed, given the way the allocator works, can we
         * count on them being absolutely contiguous? */
-       list[0].addr = ISERIES_HV_ADDR(p);
+       list[0].addr = iseries_hv_addr(p);
        list[0].size = min(length,
                           PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK));
 
        done = list[0].size;
        while (done < length) {
-               list[i].addr = ISERIES_HV_ADDR(p + done);
+               list[i].addr = iseries_hv_addr(p + done);
                list[i].size = min(length-done, PAGE_SIZE);
                done += list[i].size;
                i++;
@@ -1496,8 +1497,8 @@ static void veth_receive(struct veth_lpar_connection *cnx,
                                            cnx->dst_inst,
                                            HvLpDma_AddressType_RealAddress,
                                            HvLpDma_AddressType_TceIndex,
-                                           ISERIES_HV_ADDR(&local_list),
-                                           ISERIES_HV_ADDR(&remote_list),
+                                           iseries_hv_addr(&local_list),
+                                           iseries_hv_addr(&remote_list),
                                            length);
                if (rc != HvLpDma_Rc_Good) {
                        dev_kfree_skb_irq(skb);
index a345355..5b6b0b6 100644 (file)
@@ -629,12 +629,4 @@ void __init proc_misc_init(void)
        if (entry)
                entry->proc_fops = &proc_sysrq_trigger_operations;
 #endif
-#ifdef CONFIG_PPC32
-       {
-               extern struct file_operations ppc_htab_operations;
-               entry = create_proc_entry("ppc_htab", S_IRUGO|S_IWUSR, NULL);
-               if (entry)
-                       entry->proc_fops = &ppc_htab_operations;
-       }
-#endif
 }
similarity index 69%
rename from include/asm-ppc64/a.out.h
rename to include/asm-powerpc/a.out.h
index 3871e25..c7393a9 100644 (file)
@@ -1,14 +1,5 @@
-#ifndef __PPC64_A_OUT_H__
-#define __PPC64_A_OUT_H__
-
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
+#ifndef _ASM_POWERPC_A_OUT_H
+#define _ASM_POWERPC_A_OUT_H
 
 struct exec
 {
@@ -27,6 +18,7 @@ struct exec
 #define N_SYMSIZE(a)   ((a).a_syms)
 
 #ifdef __KERNEL__
+#ifdef __powerpc64__
 
 #define STACK_TOP_USER64 TASK_SIZE_USER64
 #define STACK_TOP_USER32 TASK_SIZE_USER32
@@ -34,6 +26,11 @@ struct exec
 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
                   STACK_TOP_USER32 : STACK_TOP_USER64)
 
+#else /* __powerpc64__ */
+
+#define STACK_TOP TASK_SIZE
+
+#endif /* __powerpc64__ */
 #endif /* __KERNEL__ */
 
-#endif /* __PPC64_A_OUT_H__ */
+#endif /* _ASM_POWERPC_A_OUT_H */
similarity index 85%
rename from include/asm-ppc/atomic.h
rename to include/asm-powerpc/atomic.h
index eeafd50..ed4b345 100644 (file)
@@ -1,29 +1,20 @@
+#ifndef _ASM_POWERPC_ATOMIC_H_
+#define _ASM_POWERPC_ATOMIC_H_
+
 /*
  * PowerPC atomic operations
  */
 
-#ifndef _ASM_PPC_ATOMIC_H_
-#define _ASM_PPC_ATOMIC_H_
-
 typedef struct { volatile int counter; } atomic_t;
 
 #ifdef __KERNEL__
+#include <asm/synch.h>
 
-#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC_INIT(i)         { (i) }
 
 #define atomic_read(v)         ((v)->counter)
 #define atomic_set(v,i)                (((v)->counter) = (i))
 
-extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
-
-#ifdef CONFIG_SMP
-#define SMP_SYNC       "sync"
-#define SMP_ISYNC      "\n\tisync"
-#else
-#define SMP_SYNC       ""
-#define SMP_ISYNC
-#endif
-
 /* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
  * The old ATOMIC_SYNC_FIX covered some but not all of this.
  */
@@ -53,12 +44,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
        int t;
 
        __asm__ __volatile__(
+       EIEIO_ON_SMP
 "1:    lwarx   %0,0,%2         # atomic_add_return\n\
        add     %0,%1,%0\n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %0,0,%2 \n\
        bne-    1b"
-       SMP_ISYNC
+       ISYNC_ON_SMP
        : "=&r" (t)
        : "r" (a), "r" (&v->counter)
        : "cc", "memory");
@@ -88,12 +80,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
        int t;
 
        __asm__ __volatile__(
+       EIEIO_ON_SMP
 "1:    lwarx   %0,0,%2         # atomic_sub_return\n\
        subf    %0,%1,%0\n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %0,0,%2 \n\
        bne-    1b"
-       SMP_ISYNC
+       ISYNC_ON_SMP
        : "=&r" (t)
        : "r" (a), "r" (&v->counter)
        : "cc", "memory");
@@ -121,12 +114,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
+       EIEIO_ON_SMP
 "1:    lwarx   %0,0,%1         # atomic_inc_return\n\
        addic   %0,%0,1\n"
        PPC405_ERR77(0,%1)
 "      stwcx.  %0,0,%1 \n\
        bne-    1b"
-       SMP_ISYNC
+       ISYNC_ON_SMP
        : "=&r" (t)
        : "r" (&v->counter)
        : "cc", "memory");
@@ -164,12 +158,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
+       EIEIO_ON_SMP
 "1:    lwarx   %0,0,%1         # atomic_dec_return\n\
        addic   %0,%0,-1\n"
        PPC405_ERR77(0,%1)
 "      stwcx.  %0,0,%1\n\
        bne-    1b"
-       SMP_ISYNC
+       ISYNC_ON_SMP
        : "=&r" (t)
        : "r" (&v->counter)
        : "cc", "memory");
@@ -189,13 +184,14 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
+       EIEIO_ON_SMP
 "1:    lwarx   %0,0,%1         # atomic_dec_if_positive\n\
        addic.  %0,%0,-1\n\
        blt-    2f\n"
        PPC405_ERR77(0,%1)
 "      stwcx.  %0,0,%1\n\
        bne-    1b"
-       SMP_ISYNC
+       ISYNC_ON_SMP
        "\n\
 2:"    : "=&r" (t)
        : "r" (&v->counter)
@@ -204,11 +200,10 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
        return t;
 }
 
-#define __MB   __asm__ __volatile__ (SMP_SYNC : : : "memory")
-#define smp_mb__before_atomic_dec()    __MB
-#define smp_mb__after_atomic_dec()     __MB
-#define smp_mb__before_atomic_inc()    __MB
-#define smp_mb__after_atomic_inc()     __MB
+#define smp_mb__before_atomic_dec()     smp_mb()
+#define smp_mb__after_atomic_dec()      smp_mb()
+#define smp_mb__before_atomic_inc()     smp_mb()
+#define smp_mb__after_atomic_inc()      smp_mb()
 
 #endif /* __KERNEL__ */
-#endif /* _ASM_PPC_ATOMIC_H_ */
+#endif /* _ASM_POWERPC_ATOMIC_H_ */
similarity index 82%
rename from include/asm-ppc64/auxvec.h
rename to include/asm-powerpc/auxvec.h
index ac6381a..79d8c47 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __PPC64_AUXVEC_H
-#define __PPC64_AUXVEC_H
+#ifndef _ASM_POWERPC_AUXVEC_H
+#define _ASM_POWERPC_AUXVEC_H
 
 /*
  * We need to put in some extra aux table entries to tell glibc what
@@ -14,6 +14,8 @@
 /* The vDSO location. We have to use the same value as x86 for glibc's
  * sake :-)
  */
+#ifdef __powerpc64__
 #define AT_SYSINFO_EHDR                33
+#endif
 
-#endif /* __PPC64_AUXVEC_H */
+#endif
similarity index 82%
rename from include/asm-ppc/backlight.h
rename to include/asm-powerpc/backlight.h
index 3a1c3de..1ba1f27 100644 (file)
@@ -1,12 +1,13 @@
 /*
  * Routines for handling backlight control on PowerBooks
  *
- * For now, implementation resides in arch/ppc/kernel/pmac_support.c
+ * For now, implementation resides in
+ * arch/powerpc/platforms/powermac/pmac_support.c
  *
  */
+#ifndef __ASM_POWERPC_BACKLIGHT_H
+#define __ASM_POWERPC_BACKLIGHT_H
 #ifdef __KERNEL__
-#ifndef __ASM_PPC_BACKLIGHT_H
-#define __ASM_PPC_BACKLIGHT_H
 
 /* Abstract values */
 #define BACKLIGHT_OFF  0
@@ -26,5 +27,5 @@ extern int get_backlight_enable(void);
 extern int set_backlight_level(int level);
 extern int get_backlight_level(void);
 
-#endif
 #endif /* __KERNEL__ */
+#endif
similarity index 62%
rename from include/asm-ppc64/bug.h
rename to include/asm-powerpc/bug.h
index 1601782..e4d028e 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _PPC64_BUG_H
-#define _PPC64_BUG_H
+#ifndef _ASM_POWERPC_BUG_H
+#define _ASM_POWERPC_BUG_H
 
 /*
  * Define an illegal instr to trap on the bug.
 
 #ifndef __ASSEMBLY__
 
+#ifdef __powerpc64__
+#define BUG_TABLE_ENTRY(label, line, file, func) \
+       ".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n"
+#define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n"
+#define DATA_TYPE long long
+#else 
+#define BUG_TABLE_ENTRY(label, line, file, func) \
+       ".long " #label ", " #line ", " #file ", " #func "\n"
+#define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n"
+#define DATA_TYPE int
+#endif /* __powerpc64__ */
+
 struct bug_entry {
        unsigned long   bug_addr;
-       long            line;
+       int             line;
        const char      *file;
        const char      *function;
 };
@@ -32,28 +44,28 @@ struct bug_entry *find_bug(unsigned long bugaddr);
        __asm__ __volatile__(                                            \
                "1:     twi 31,0,0\n"                                    \
                ".section __bug_table,\"a\"\n\t"                         \
-               "       .llong 1b,%0,%1,%2\n"                            \
+               BUG_TABLE_ENTRY(1b,%0,%1,%2)                             \
                ".previous"                                              \
                : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
 } while (0)
 
 #define BUG_ON(x) do {                                         \
        __asm__ __volatile__(                                   \
-               "1:     tdnei %0,0\n"                           \
+               TRAP_OP(%0,0)                                   \
                ".section __bug_table,\"a\"\n\t"                \
-               "       .llong 1b,%1,%2,%3\n"                   \
+               BUG_TABLE_ENTRY(1b,%1,%2,%3)                    \
                ".previous"                                     \
-               : : "r" ((long long)(x)), "i" (__LINE__),       \
+               : : "r" ((DATA_TYPE)(x)), "i" (__LINE__),       \
                    "i" (__FILE__), "i" (__FUNCTION__));        \
 } while (0)
 
 #define WARN_ON(x) do {                                                \
        __asm__ __volatile__(                                   \
-               "1:     tdnei %0,0\n"                           \
+               TRAP_OP(%0,0)                                   \
                ".section __bug_table,\"a\"\n\t"                \
-               "       .llong 1b,%1,%2,%3\n"                   \
+               BUG_TABLE_ENTRY(1b,%1,%2,%3)                    \
                ".previous"                                     \
-               : : "r" ((long long)(x)),                       \
+               : : "r" ((DATA_TYPE)(x)),                       \
                    "i" (__LINE__ + BUG_WARNING_TRAP),          \
                    "i" (__FILE__), "i" (__FUNCTION__));        \
 } while (0)
@@ -61,9 +73,9 @@ struct bug_entry *find_bug(unsigned long bugaddr);
 #define HAVE_ARCH_BUG
 #define HAVE_ARCH_BUG_ON
 #define HAVE_ARCH_WARN_ON
-#endif
-#endif
+#endif /* CONFIG_BUG */
+#endif /* __ASSEMBLY __ */
 
 #include <asm-generic/bug.h>
 
-#endif
+#endif /* _ASM_POWERPC_BUG_H */
similarity index 90%
rename from include/asm-ppc64/byteorder.h
rename to include/asm-powerpc/byteorder.h
index 8b57da6..b377522 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _PPC64_BYTEORDER_H
-#define _PPC64_BYTEORDER_H
+#ifndef _ASM_POWERPC_BYTEORDER_H
+#define _ASM_POWERPC_BYTEORDER_H
 
 /*
  * This program is free software; you can redistribute it and/or
@@ -77,10 +77,13 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
 
 #ifndef __STRICT_ANSI__
 #define __BYTEORDER_HAS_U64__
-#endif
+#ifndef __powerpc64__
+#define __SWAB_64_THRU_32__
+#endif /* __powerpc64__ */
+#endif /* __STRICT_ANSI__ */
 
 #endif /* __GNUC__ */
 
 #include <linux/byteorder/big_endian.h>
 
-#endif /* _PPC64_BYTEORDER_H */
+#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
new file mode 100644 (file)
index 0000000..c019501
--- /dev/null
@@ -0,0 +1,427 @@
+#ifndef __ASM_POWERPC_CPUTABLE_H
+#define __ASM_POWERPC_CPUTABLE_H
+
+#include <linux/config.h>
+#include <asm/ppc_asm.h> /* for ASM_CONST */
+
+#define PPC_FEATURE_32                 0x80000000
+#define PPC_FEATURE_64                 0x40000000
+#define PPC_FEATURE_601_INSTR          0x20000000
+#define PPC_FEATURE_HAS_ALTIVEC                0x10000000
+#define PPC_FEATURE_HAS_FPU            0x08000000
+#define PPC_FEATURE_HAS_MMU            0x04000000
+#define PPC_FEATURE_HAS_4xxMAC         0x02000000
+#define PPC_FEATURE_UNIFIED_CACHE      0x01000000
+#define PPC_FEATURE_HAS_SPE            0x00800000
+#define PPC_FEATURE_HAS_EFP_SINGLE     0x00400000
+#define PPC_FEATURE_HAS_EFP_DOUBLE     0x00200000
+#define PPC_FEATURE_NO_TB              0x00100000
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+/* This structure can grow, it's real size is used by head.S code
+ * via the mkdefs mechanism.
+ */
+struct cpu_spec;
+struct op_powerpc_model;
+
+typedef        void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
+
+struct cpu_spec {
+       /* CPU is matched via (PVR & pvr_mask) == pvr_value */
+       unsigned int    pvr_mask;
+       unsigned int    pvr_value;
+
+       char            *cpu_name;
+       unsigned long   cpu_features;           /* Kernel features */
+       unsigned int    cpu_user_features;      /* Userland features */
+
+       /* cache line sizes */
+       unsigned int    icache_bsize;
+       unsigned int    dcache_bsize;
+
+       /* number of performance monitor counters */
+       unsigned int    num_pmcs;
+
+       /* this is called to initialize various CPU bits like L1 cache,
+        * BHT, SPD, etc... from head.S before branching to identify_machine
+        */
+       cpu_setup_t     cpu_setup;
+
+       /* Used by oprofile userspace to select the right counters */
+       char            *oprofile_cpu_type;
+
+       /* Processor specific oprofile operations */
+       struct op_powerpc_model *oprofile_model;
+};
+
+extern struct cpu_spec         *cur_cpu_spec;
+
+extern void identify_cpu(unsigned long offset, unsigned long cpu);
+extern void do_cpu_ftr_fixups(unsigned long offset);
+
+#endif /* __ASSEMBLY__ */
+
+/* CPU kernel features */
+
+/* Retain the 32b definitions all use bottom half of word */
+#define CPU_FTR_SPLIT_ID_CACHE         ASM_CONST(0x0000000000000001)
+#define CPU_FTR_L2CR                   ASM_CONST(0x0000000000000002)
+#define CPU_FTR_SPEC7450               ASM_CONST(0x0000000000000004)
+#define CPU_FTR_ALTIVEC                        ASM_CONST(0x0000000000000008)
+#define CPU_FTR_TAU                    ASM_CONST(0x0000000000000010)
+#define CPU_FTR_CAN_DOZE               ASM_CONST(0x0000000000000020)
+#define CPU_FTR_USE_TB                 ASM_CONST(0x0000000000000040)
+#define CPU_FTR_604_PERF_MON           ASM_CONST(0x0000000000000080)
+#define CPU_FTR_601                    ASM_CONST(0x0000000000000100)
+#define CPU_FTR_HPTE_TABLE             ASM_CONST(0x0000000000000200)
+#define CPU_FTR_CAN_NAP                        ASM_CONST(0x0000000000000400)
+#define CPU_FTR_L3CR                   ASM_CONST(0x0000000000000800)
+#define CPU_FTR_L3_DISABLE_NAP         ASM_CONST(0x0000000000001000)
+#define CPU_FTR_NAP_DISABLE_L2_PR      ASM_CONST(0x0000000000002000)
+#define CPU_FTR_DUAL_PLL_750FX         ASM_CONST(0x0000000000004000)
+#define CPU_FTR_NO_DPM                 ASM_CONST(0x0000000000008000)
+#define CPU_FTR_HAS_HIGH_BATS          ASM_CONST(0x0000000000010000)
+#define CPU_FTR_NEED_COHERENT          ASM_CONST(0x0000000000020000)
+#define CPU_FTR_NO_BTIC                        ASM_CONST(0x0000000000040000)
+#define CPU_FTR_BIG_PHYS               ASM_CONST(0x0000000000080000)
+
+#ifdef __powerpc64__
+/* Add the 64b processor unique features in the top half of the word */
+#define CPU_FTR_SLB                    ASM_CONST(0x0000000100000000)
+#define CPU_FTR_16M_PAGE               ASM_CONST(0x0000000200000000)
+#define CPU_FTR_TLBIEL                         ASM_CONST(0x0000000400000000)
+#define CPU_FTR_NOEXECUTE              ASM_CONST(0x0000000800000000)
+#define CPU_FTR_NODSISRALIGN           ASM_CONST(0x0000001000000000)
+#define CPU_FTR_IABR                   ASM_CONST(0x0000002000000000)
+#define CPU_FTR_MMCRA                          ASM_CONST(0x0000004000000000)
+#define CPU_FTR_CTRL                   ASM_CONST(0x0000008000000000)
+#define CPU_FTR_SMT                    ASM_CONST(0x0000010000000000)
+#define CPU_FTR_COHERENT_ICACHE        ASM_CONST(0x0000020000000000)
+#define CPU_FTR_LOCKLESS_TLBIE         ASM_CONST(0x0000040000000000)
+#define CPU_FTR_MMCRA_SIHV             ASM_CONST(0x0000080000000000)
+#else
+/* ensure on 32b processors the flags are available for compiling but
+ * don't do anything */
+#define CPU_FTR_SLB                    ASM_CONST(0x0)
+#define CPU_FTR_16M_PAGE               ASM_CONST(0x0)
+#define CPU_FTR_TLBIEL                         ASM_CONST(0x0)
+#define CPU_FTR_NOEXECUTE              ASM_CONST(0x0)
+#define CPU_FTR_NODSISRALIGN           ASM_CONST(0x0)
+#define CPU_FTR_IABR                   ASM_CONST(0x0)
+#define CPU_FTR_MMCRA                          ASM_CONST(0x0)
+#define CPU_FTR_CTRL                   ASM_CONST(0x0)
+#define CPU_FTR_SMT                    ASM_CONST(0x0)
+#define CPU_FTR_COHERENT_ICACHE        ASM_CONST(0x0)
+#define CPU_FTR_LOCKLESS_TLBIE         ASM_CONST(0x0)
+#define CPU_FTR_MMCRA_SIHV             ASM_CONST(0x0)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
+                                       CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
+                                       CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL)
+
+/* iSeries doesn't support large pages */
+#ifdef CONFIG_PPC_ISERIES
+#define CPU_FTR_PPCAS_ARCH_V2  (CPU_FTR_PPCAS_ARCH_V2_BASE)
+#else
+#define CPU_FTR_PPCAS_ARCH_V2  (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
+#endif /* CONFIG_PPC_ISERIES */
+
+/* We only set the altivec features if the kernel was compiled with altivec
+ * support
+ */
+#ifdef CONFIG_ALTIVEC
+#define CPU_FTR_ALTIVEC_COMP   CPU_FTR_ALTIVEC
+#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
+#else
+#define CPU_FTR_ALTIVEC_COMP   0
+#define PPC_FEATURE_HAS_ALTIVEC_COMP    0
+#endif
+
+/* We need to mark all pages as being coherent if we're SMP or we
+ * have a 74[45]x and an MPC107 host bridge.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
+#define CPU_FTR_COMMON                  CPU_FTR_NEED_COHERENT
+#else
+#define CPU_FTR_COMMON                  0
+#endif
+
+/* The powersave features NAP & DOZE seems to confuse BDI when
+   debugging. So if a BDI is used, disable theses
+ */
+#ifndef CONFIG_BDI_SWITCH
+#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE
+#define CPU_FTR_MAYBE_CAN_NAP  CPU_FTR_CAN_NAP
+#else
+#define CPU_FTR_MAYBE_CAN_DOZE 0
+#define CPU_FTR_MAYBE_CAN_NAP  0
+#endif
+
+#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
+                    !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
+                    !defined(CONFIG_BOOKE))
+
+enum {
+       CPU_FTRS_PPC601 = CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE,
+       CPU_FTRS_603 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
+           CPU_FTR_MAYBE_CAN_NAP,
+       CPU_FTRS_604 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
+       CPU_FTRS_740_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+       CPU_FTRS_740 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+           CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+       CPU_FTRS_750 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+           CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+       CPU_FTRS_750FX1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+           CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+           CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
+       CPU_FTRS_750FX2 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+           CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+           CPU_FTR_NO_DPM,
+       CPU_FTRS_750FX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+           CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+           CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
+       CPU_FTRS_750GX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
+           CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+           CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
+       CPU_FTRS_7400_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+           CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
+           CPU_FTR_MAYBE_CAN_NAP,
+       CPU_FTRS_7400 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+           CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
+           CPU_FTR_MAYBE_CAN_NAP,
+       CPU_FTRS_7450_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+           CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+           CPU_FTR_NEED_COHERENT,
+       CPU_FTRS_7450_21 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB |
+           CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+           CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+           CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
+           CPU_FTR_NEED_COHERENT,
+       CPU_FTRS_7450_23 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB |
+           CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+           CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+           CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
+       CPU_FTRS_7455_1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB |
+           CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS |
+           CPU_FTR_NEED_COHERENT,
+       CPU_FTRS_7455_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB |
+           CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+           CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+           CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
+           CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
+       CPU_FTRS_7455 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB |
+           CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+           CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+           CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+           CPU_FTR_NEED_COHERENT,
+       CPU_FTRS_7447_10 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB |
+           CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+           CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+           CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+           CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
+       CPU_FTRS_7447 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB |
+           CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+           CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+           CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+           CPU_FTR_NEED_COHERENT,
+       CPU_FTRS_7447A = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB |
+           CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+           CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+           CPU_FTR_NEED_COHERENT,
+       CPU_FTRS_82XX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB,
+       CPU_FTRS_G2_LE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
+           CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
+       CPU_FTRS_E300 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
+           CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
+       CPU_FTRS_CLASSIC32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
+       CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
+       CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
+       CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+           CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP |
+           CPU_FTR_MAYBE_CAN_NAP,
+       CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+       CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+       CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+       CPU_FTRS_E200 = CPU_FTR_USE_TB,
+       CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+       CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+           CPU_FTR_BIG_PHYS,
+       CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON,
+#ifdef __powerpc64__
+       CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
+       CPU_FTRS_RS64 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
+           CPU_FTR_MMCRA | CPU_FTR_CTRL,
+       CPU_FTRS_POWER4 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
+       CPU_FTRS_PPC970 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
+           CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
+       CPU_FTRS_POWER5 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
+           CPU_FTR_MMCRA | CPU_FTR_SMT |
+           CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
+           CPU_FTR_MMCRA_SIHV,
+       CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
+           CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT,
+       CPU_FTRS_COMPATIBLE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+           CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2,
+#endif
+
+       CPU_FTRS_POSSIBLE =
+#if CLASSIC_PPC
+           CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
+           CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
+           CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
+           CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 |
+           CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
+           CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
+           CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
+           CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_CLASSIC32 |
+#else
+           CPU_FTRS_GENERIC_32 |
+#endif
+#ifdef CONFIG_PPC64BRIDGE
+           CPU_FTRS_POWER3_32 |
+#endif
+#ifdef CONFIG_POWER4
+           CPU_FTRS_POWER4_32 | CPU_FTRS_970_32 |
+#endif
+#ifdef CONFIG_8xx
+           CPU_FTRS_8XX |
+#endif
+#ifdef CONFIG_40x
+           CPU_FTRS_40X |
+#endif
+#ifdef CONFIG_44x
+           CPU_FTRS_44X |
+#endif
+#ifdef CONFIG_E200
+           CPU_FTRS_E200 |
+#endif
+#ifdef CONFIG_E500
+           CPU_FTRS_E500 | CPU_FTRS_E500_2 |
+#endif
+#ifdef __powerpc64__
+           CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |
+           CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL |
+#endif
+           0,
+
+       CPU_FTRS_ALWAYS =
+#if CLASSIC_PPC
+           CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
+           CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
+           CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
+           CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
+           CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
+           CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
+           CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
+           CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_CLASSIC32 &
+#else
+           CPU_FTRS_GENERIC_32 &
+#endif
+#ifdef CONFIG_PPC64BRIDGE
+           CPU_FTRS_POWER3_32 &
+#endif
+#ifdef CONFIG_POWER4
+           CPU_FTRS_POWER4_32 & CPU_FTRS_970_32 &
+#endif
+#ifdef CONFIG_8xx
+           CPU_FTRS_8XX &
+#endif
+#ifdef CONFIG_40x
+           CPU_FTRS_40X &
+#endif
+#ifdef CONFIG_44x
+           CPU_FTRS_44X &
+#endif
+#ifdef CONFIG_E200
+           CPU_FTRS_E200 &
+#endif
+#ifdef CONFIG_E500
+           CPU_FTRS_E500 & CPU_FTRS_E500_2 &
+#endif
+#ifdef __powerpc64__
+           CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &
+           CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_CELL &
+#endif
+           CPU_FTRS_POSSIBLE,
+};
+
+static inline int cpu_has_feature(unsigned long feature)
+{
+       return (CPU_FTRS_ALWAYS & feature) ||
+              (CPU_FTRS_POSSIBLE
+               & cur_cpu_spec->cpu_features
+               & feature);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef __ASSEMBLY__
+
+#define BEGIN_FTR_SECTION              98:
+
+#ifndef __powerpc64__
+#define END_FTR_SECTION(msk, val)              \
+99:                                            \
+       .section __ftr_fixup,"a";               \
+       .align 2;                               \
+       .long msk;                              \
+       .long val;                              \
+       .long 98b;                              \
+       .long 99b;                              \
+       .previous
+#else /* __powerpc64__ */
+#define END_FTR_SECTION(msk, val)              \
+99:                                            \
+       .section __ftr_fixup,"a";               \
+       .align 3;                               \
+       .llong msk;                             \
+       .llong val;                             \
+       .llong 98b;                             \
+       .llong 99b;                             \
+       .previous
+#endif /* __powerpc64__ */
+
+#define END_FTR_SECTION_IFSET(msk)     END_FTR_SECTION((msk), (msk))
+#define END_FTR_SECTION_IFCLR(msk)     END_FTR_SECTION((msk), 0)
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_CPUTABLE_H */
similarity index 86%
rename from include/asm-ppc/dma.h
rename to include/asm-powerpc/dma.h
index cc8e5cd..926378d 100644 (file)
@@ -1,18 +1,14 @@
+#ifndef _ASM_POWERPC_DMA_H
+#define _ASM_POWERPC_DMA_H
+
 /*
- * include/asm-ppc/dma.h: Defines for using and allocating dma channels.
+ * Defines for using and allocating dma channels.
  * Written by Hennus Bergman, 1992.
  * High DMA channel support & info by Hannu Savolainen
  * and John Boyd, Nov. 1992.
  * Changes for ppc sound by Christoph Nadig
  */
 
-#ifdef __KERNEL__
-
-#include <linux/config.h>
-#include <asm/io.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-
 /*
  * Note: Adapted for PowerPC by Gary Thomas
  * Modified by Cort Dougan <cort@cs.nmt.edu>
  * with a grain of salt.
  */
 
-#ifndef _ASM_DMA_H
-#define _ASM_DMA_H
+#include <linux/config.h>
+#include <asm/io.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
 
 #ifndef MAX_DMA_CHANNELS
 #define MAX_DMA_CHANNELS       8
 
 /* The maximum address that we can perform a DMA transfer to on this platform */
 /* Doesn't really apply... */
-#define MAX_DMA_ADDRESS                0xFFFFFFFF
+#define MAX_DMA_ADDRESS                (~0UL)
 
-/* in arch/ppc/kernel/setup.c -- Cort */
-extern unsigned long DMA_MODE_WRITE, DMA_MODE_READ;
-extern unsigned long ISA_DMA_THRESHOLD;
+#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
 
 #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
 #define dma_outb       outb_p
@@ -171,7 +167,18 @@ extern long ppc_cs4232_dma, ppc_cs4232_dma2;
 #define DMA1_EXT_REG           0x40B
 #define DMA2_EXT_REG           0x4D6
 
+#ifndef __powerpc64__
+    /* in arch/ppc/kernel/setup.c -- Cort */
+    extern unsigned int DMA_MODE_WRITE;
+    extern unsigned int DMA_MODE_READ;
+    extern unsigned long ISA_DMA_THRESHOLD;
+#else
+    #define DMA_MODE_READ      0x44    /* I/O to memory, no autoinit, increment, single mode */
+    #define DMA_MODE_WRITE     0x48    /* memory to I/O, no autoinit, increment, single mode */
+#endif
+
 #define DMA_MODE_CASCADE       0xC0    /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
 #define DMA_AUTOINIT           0x10
 
 extern spinlock_t dma_spin_lock;
@@ -200,8 +207,9 @@ static __inline__ void enable_dma(unsigned int dmanr)
        if (dmanr <= 3) {
                dma_outb(dmanr, DMA1_MASK_REG);
                dma_outb(ucDmaCmd, DMA1_CMD_REG);       /* Enable group */
-       } else
+       } else {
                dma_outb(dmanr & 3, DMA2_MASK_REG);
+       }
 }
 
 static __inline__ void disable_dma(unsigned int dmanr)
@@ -290,19 +298,26 @@ static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
 static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
 {
        if (dmanr <= 3) {
-               dma_outb(phys & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
-               dma_outb((phys >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
+               dma_outb(phys & 0xff,
+                        ((dmanr & 3) << 1) + IO_DMA1_BASE);
+               dma_outb((phys >> 8) & 0xff,
+                        ((dmanr & 3) << 1) + IO_DMA1_BASE);
        } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
-               dma_outb(phys & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
-               dma_outb((phys >> 8) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+               dma_outb(phys & 0xff,
+                        ((dmanr & 3) << 2) + IO_DMA2_BASE);
+               dma_outb((phys >> 8) & 0xff,
+                        ((dmanr & 3) << 2) + IO_DMA2_BASE);
                dma_outb((dmanr & 3), DMA2_EXT_REG);
        } else {
-               dma_outb((phys >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
-               dma_outb((phys >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+               dma_outb((phys >> 1) & 0xff,
+                        ((dmanr & 3) << 2) + IO_DMA2_BASE);
+               dma_outb((phys >> 9) & 0xff,
+                        ((dmanr & 3) << 2) + IO_DMA2_BASE);
        }
        set_dma_page(dmanr, phys >> 16);
 }
 
+
 /* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
  * a specific DMA channel.
  * You must ensure the parameters are valid.
@@ -315,21 +330,24 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
 {
        count--;
        if (dmanr <= 3) {
-               dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
-               dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 1) + 1 +
-                        IO_DMA1_BASE);
+               dma_outb(count & 0xff,
+                        ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+               dma_outb((count >> 8) & 0xff,
+                        ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
        } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
-               dma_outb(count & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
-               dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 2) + 2 +
-                        IO_DMA2_BASE);
+               dma_outb(count & 0xff,
+                        ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+               dma_outb((count >> 8) & 0xff,
+                        ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
        } else {
-               dma_outb((count >> 1) & 0xff, ((dmanr & 3) << 2) + 2 +
-                        IO_DMA2_BASE);
-               dma_outb((count >> 9) & 0xff, ((dmanr & 3) << 2) + 2 +
-                        IO_DMA2_BASE);
+               dma_outb((count >> 1) & 0xff,
+                        ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+               dma_outb((count >> 9) & 0xff,
+                        ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
        }
 }
 
+
 /* Get DMA residue count. After a DMA transfer, this
  * should return zero. Reading this while a DMA transfer is
  * still in progress will return unpredictable results.
@@ -340,8 +358,8 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
  */
 static __inline__ int get_dma_residue(unsigned int dmanr)
 {
-       unsigned int io_port = (dmanr <= 3) ?
-           ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
+       unsigned int io_port = (dmanr <= 3)
+           ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
            : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
 
        /* using short to get 16-bit wrap around */
@@ -352,7 +370,6 @@ static __inline__ int get_dma_residue(unsigned int dmanr)
 
        return (dmanr <= 3 || dmanr == SND_DMA1 || dmanr == SND_DMA2)
            ? count : (count << 1);
-
 }
 
 /* These are in kernel/dma.c: */
@@ -367,5 +384,7 @@ extern int isa_dma_bridge_buggy;
 #else
 #define isa_dma_bridge_buggy   (0)
 #endif
-#endif                         /* _ASM_DMA_H */
-#endif                         /* __KERNEL__ */
+
+#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
+
+#endif /* _ASM_POWERPC_DMA_H */
similarity index 87%
rename from include/asm-ppc64/elf.h
rename to include/asm-powerpc/elf.h
index c919a89..d22b100 100644 (file)
@@ -1,10 +1,11 @@
-#ifndef __PPC64_ELF_H
-#define __PPC64_ELF_H
+#ifndef _ASM_POWERPC_ELF_H
+#define _ASM_POWERPC_ELF_H
 
 #include <asm/types.h>
 #include <asm/ptrace.h>
 #include <asm/cputable.h>
 #include <asm/auxvec.h>
+#include <asm/page.h>
 
 /* PowerPC relocations defined by the ABIs */
 #define R_PPC_NONE             0
@@ -75,7 +76,7 @@
 #define R_PPC_GOT_DTPREL16_HI  93 /* half16*   (sym+add)@got@dtprel@h */
 #define R_PPC_GOT_DTPREL16_HA  94 /* half16*   (sym+add)@got@dtprel@ha */
 
-/* Keep this the last entry.  */
+/* keep this the last entry. */
 #define R_PPC_NUM              95
 
 /*
@@ -90,8 +91,6 @@
 
 #define ELF_NGREG      48      /* includes nip, msr, lr, etc. */
 #define ELF_NFPREG     33      /* includes fpscr */
-#define ELF_NVRREG32   33      /* includes vscr & vrsave stuffed together */
-#define ELF_NVRREG     34      /* includes vscr & vrsave in split vectors */
 
 typedef unsigned long elf_greg_t64;
 typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
@@ -100,8 +99,21 @@ typedef unsigned int elf_greg_t32;
 typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
 
 /*
- * These are used to set parameters in the core dumps.
+ * ELF_ARCH, CLASS, and DATA are used to set parameters in the core dumps.
  */
+#ifdef __powerpc64__
+# define ELF_NVRREG32  33      /* includes vscr & vrsave stuffed together */
+# define ELF_NVRREG    34      /* includes vscr & vrsave in split vectors */
+# define ELF_GREG_TYPE elf_greg_t64
+#else
+# define ELF_NEVRREG   34      /* includes acc (as 2) */
+# define ELF_NVRREG    33      /* includes vscr */
+# define ELF_GREG_TYPE elf_greg_t32
+# define ELF_ARCH      EM_PPC
+# define ELF_CLASS     ELFCLASS32
+# define ELF_DATA      ELFDATA2MSB
+#endif /* __powerpc64__ */
+
 #ifndef ELF_ARCH
 # define ELF_ARCH      EM_PPC64
 # define ELF_CLASS     ELFCLASS64
@@ -114,8 +126,9 @@ typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
   typedef elf_greg_t32 elf_greg_t;
   typedef elf_gregset_t32 elf_gregset_t;
 # define elf_addr_t u32
-#endif
+#endif /* ELF_ARCH */
 
+/* Floating point registers */
 typedef double elf_fpreg_t;
 typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
@@ -125,7 +138,9 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
  * The entry with index 32 contains the vscr as the last word (offset 12) 
  * within the quadword.  This allows the vscr to be stored as either a 
  * quadword (since it must be copied via a vector register to/from storage) 
- * or as a word.  The entry with index 33 contains the vrsave as the first 
+ * or as a word.  
+ *
+ * 64-bit kernel notes: The entry at index 33 contains the vrsave as the first  
  * word (offset 0) within the quadword.
  *
  * This definition of the VMX state is compatible with the current PPC32 
@@ -138,7 +153,9 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
  */
 typedef __vector128 elf_vrreg_t;
 typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
+#ifdef __powerpc64__
 typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
+#endif
 
 /*
  * This is used to ensure we don't load something for the wrong architecture.
@@ -146,7 +163,7 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
 #define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
 
 #define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE      4096
+#define ELF_EXEC_PAGESIZE      PAGE_SIZE
 
 /* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
    use of this is to invoke "./ld.so someprog" to test out a new version of
@@ -158,26 +175,30 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
 #ifdef __KERNEL__
 
 /* Common routine for both 32-bit and 64-bit processes */
-static inline void ppc64_elf_core_copy_regs(elf_gregset_t elf_regs,
+static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
                                            struct pt_regs *regs)
 {
        int i;
-       int gprs = sizeof(struct pt_regs)/sizeof(elf_greg_t64);
+       int gprs = sizeof(struct pt_regs)/sizeof(ELF_GREG_TYPE);
 
        if (gprs > ELF_NGREG)
                gprs = ELF_NGREG;
 
        for (i=0; i < gprs; i++)
-               elf_regs[i] = (elf_greg_t)((elf_greg_t64 *)regs)[i];
+               elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i];
+
+       memset((char *)(elf_regs) + sizeof(struct pt_regs), 0,          \
+              sizeof(elf_gregset_t) - sizeof(struct pt_regs));
+
 }
-#define ELF_CORE_COPY_REGS(gregs, regs) ppc64_elf_core_copy_regs(gregs, regs);
+#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
 
 static inline int dump_task_regs(struct task_struct *tsk,
                                 elf_gregset_t *elf_regs)
 {
        struct pt_regs *regs = tsk->thread.regs;
        if (regs)
-               ppc64_elf_core_copy_regs(*elf_regs, regs);
+               ppc_elf_core_copy_regs(*elf_regs, regs);
 
        return 1;
 }
@@ -186,15 +207,17 @@ static inline int dump_task_regs(struct task_struct *tsk,
 extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); 
 #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
 
-/* XXX Should we define the XFPREGS using altivec ??? */
-
-#endif
+#endif /* __KERNEL__ */
 
-/* This yields a mask that user programs can use to figure out what
+/* ELF_HWCAP yields a mask that user programs can use to figure out what
    instruction set this cpu supports.  This could be done in userspace,
    but it's not easy, and we've already done it here.  */
-
-#define ELF_HWCAP      (cur_cpu_spec->cpu_user_features)
+# define ELF_HWCAP     (cur_cpu_spec->cpu_user_features)
+#ifdef __powerpc64__
+# define ELF_PLAT_INIT(_r, load_addr)  do {    \
+       _r->gpr[2] = load_addr;                 \
+} while (0)
+#endif /* __powerpc64__ */
 
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
@@ -205,14 +228,10 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
 
 #define ELF_PLATFORM   (NULL)
 
-#define ELF_PLAT_INIT(_r, load_addr)   do { \
-       memset(_r->gpr, 0, sizeof(_r->gpr)); \
-       _r->ctr = _r->link = _r->xer = _r->ccr = 0; \
-       _r->gpr[2] = load_addr; \
-} while (0)
-
 #ifdef __KERNEL__
-#define SET_PERSONALITY(ex, ibcs2)                             \
+
+#ifdef __powerpc64__
+# define SET_PERSONALITY(ex, ibcs2)                            \
 do {                                                           \
        unsigned long new_flags = 0;                            \
        if ((ex).e_ident[EI_CLASS] == ELFCLASS32)               \
@@ -225,7 +244,6 @@ do {                                                                \
        if (personality(current->personality) != PER_LINUX32)   \
                set_personality(PER_LINUX);                     \
 } while (0)
-
 /*
  * An executable for which elf_read_implies_exec() returns TRUE will
  * have the READ_IMPLIES_EXEC personality flag set automatically. This
@@ -233,19 +251,26 @@ do {                                                              \
  * the 64bit ABI has never had these issues dont enable the workaround
  * even if we have an executable stack.
  */
-#define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
+# define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
                (exec_stk != EXSTACK_DISABLE_X) : 0)
+#else 
+# define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
+#endif /* __powerpc64__ */
 
-#endif
+#endif /* __KERNEL__ */
 
 extern int dcache_bsize;
 extern int icache_bsize;
 extern int ucache_bsize;
 
-/* We do have an arch_setup_additional_pages for vDSO matters */
-#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+#ifdef __powerpc64__
 struct linux_binprm;
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES        /* vDSO has arch_setup_additional_pages */
 extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack);
+#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
+#else
+#define VDSO_AUX_ENT(a,b)
+#endif /* __powerpc64__ */
 
 /*
  * The requirements here are:
@@ -265,9 +290,8 @@ do {                                                                        \
        NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize);                      \
        NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize);                      \
        NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize);                      \
-       /* vDSO base */                                                 \
-       NEW_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base);        \
- } while (0)
+       VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base)        \
+} while (0)
 
 /* PowerPC64 relocations defined by the ABIs */
 #define R_PPC64_NONE    R_PPC_NONE
@@ -384,4 +408,4 @@ do {                                                                        \
 /* Keep this the last entry.  */
 #define R_PPC64_NUM            107
 
-#endif /* __PPC64_ELF_H */
+#endif /* _ASM_POWERPC_ELF_H */
similarity index 70%
rename from include/asm-ppc/hardirq.h
rename to include/asm-powerpc/hardirq.h
index 94f1411..3b3e3b4 100644 (file)
@@ -1,11 +1,8 @@
-#ifdef __KERNEL__
-#ifndef __ASM_HARDIRQ_H
-#define __ASM_HARDIRQ_H
+#ifndef _ASM_POWERPC_HARDIRQ_H
+#define _ASM_POWERPC_HARDIRQ_H
 
-#include <linux/config.h>
-#include <linux/cache.h>
-#include <linux/smp_lock.h>
 #include <asm/irq.h>
+#include <asm/bug.h>
 
 /* The __last_jiffy_stamp field is needed to ensure that no decrementer
  * interrupt is lost on SMP machines. Since on most CPUs it is in the same
@@ -13,7 +10,7 @@
  * for uniformity.
  */
 typedef struct {
-       unsigned long __softirq_pending;        /* set_bit is used on this */
+       unsigned int __softirq_pending; /* set_bit is used on this */
        unsigned int __last_jiffy_stamp;
 } ____cacheline_aligned irq_cpustat_t;
 
@@ -27,5 +24,4 @@ static inline void ack_bad_irq(int irq)
        BUG();
 }
 
-#endif /* __ASM_HARDIRQ_H */
-#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HARDIRQ_H */
similarity index 64%
rename from include/asm-ppc64/hw_irq.h
rename to include/asm-powerpc/hw_irq.h
index baea40e..c37b31b 100644 (file)
@@ -1,22 +1,17 @@
 /*
  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- *
- * Use inline IRQs where possible - Anton Blanchard <anton@au.ibm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
+#ifndef _ASM_POWERPC_HW_IRQ_H
+#define _ASM_POWERPC_HW_IRQ_H
+
 #ifdef __KERNEL__
-#ifndef _PPC64_HW_IRQ_H
-#define _PPC64_HW_IRQ_H
 
 #include <linux/config.h>
 #include <linux/errno.h>
-#include <asm/irq.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
 
-int timer_interrupt(struct pt_regs *);
+extern void timer_interrupt(struct pt_regs *);
 extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
 
 #ifdef CONFIG_PPC_ISERIES
@@ -33,45 +28,60 @@ extern void local_irq_restore(unsigned long);
 
 #else
 
-#define local_save_flags(flags)        ((flags) = mfmsr())
+#if defined(CONFIG_BOOKE)
+#define SET_MSR_EE(x)  mtmsr(x)
+#define local_irq_restore(flags)       __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
+#elif defined(__powerpc64__)
+#define SET_MSR_EE(x)  __mtmsrd(x, 1)
 #define local_irq_restore(flags) do { \
        __asm__ __volatile__("": : :"memory"); \
        __mtmsrd((flags), 1); \
 } while(0)
+#else
+#define SET_MSR_EE(x)  mtmsr(x)
+#define local_irq_restore(flags)       mtmsr(flags)
+#endif
 
 static inline void local_irq_disable(void)
 {
+#ifdef CONFIG_BOOKE
+       __asm__ __volatile__("wrteei 0": : :"memory");
+#else
        unsigned long msr;
-       msr = mfmsr();
-       __mtmsrd(msr & ~MSR_EE, 1);
        __asm__ __volatile__("": : :"memory");
+       msr = mfmsr();
+       SET_MSR_EE(msr & ~MSR_EE);
+#endif
 }
 
 static inline void local_irq_enable(void)
 {
+#ifdef CONFIG_BOOKE
+       __asm__ __volatile__("wrteei 1": : :"memory");
+#else
        unsigned long msr;
        __asm__ __volatile__("": : :"memory");
        msr = mfmsr();
-       __mtmsrd(msr | MSR_EE, 1);
+       SET_MSR_EE(msr | MSR_EE);
+#endif
 }
 
-static inline void __do_save_and_cli(unsigned long *flags)
+static inline void local_irq_save_ptr(unsigned long *flags)
 {
        unsigned long msr;
        msr = mfmsr();
        *flags = msr;
-       __mtmsrd(msr & ~MSR_EE, 1);
+#ifdef CONFIG_BOOKE
+       __asm__ __volatile__("wrteei 0": : :"memory");
+#else
+       SET_MSR_EE(msr & ~MSR_EE);
+#endif
        __asm__ __volatile__("": : :"memory");
 }
 
-#define local_irq_save(flags)          __do_save_and_cli(&flags)
-
-#define irqs_disabled()                                \
-({                                             \
-       unsigned long flags;                    \
-       local_save_flags(flags);                \
-       !(flags & MSR_EE);                      \
-})
+#define local_save_flags(flags)        ((flags) = mfmsr())
+#define local_irq_save(flags)  local_irq_save_ptr(&flags)
+#define irqs_disabled()                ((mfmsr() & MSR_EE) == 0)
 
 #endif /* CONFIG_PPC_ISERIES */
 
@@ -99,6 +109,6 @@ static inline void __do_save_and_cli(unsigned long *flags)
  */
 struct hw_interrupt_type;
 static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
-#endif /* _PPC64_HW_IRQ_H */
-#endif /* __KERNEL__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HW_IRQ_H */
similarity index 62%
rename from include/asm-ppc/i8259.h
rename to include/asm-powerpc/i8259.h
index 091b712..9521ad4 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _PPC_KERNEL_i8259_H
-#define _PPC_KERNEL_i8259_H
+#ifndef _ASM_POWERPC_I8259_H
+#define _ASM_POWERPC_I8259_H
 
 #include <linux/irq.h>
 
@@ -8,4 +8,4 @@ extern struct hw_interrupt_type i8259_pic;
 extern void i8259_init(long intack_addr);
 extern int i8259_irq(struct pt_regs *regs);
 
-#endif /* _PPC_KERNEL_i8259_H */
+#endif /* _ASM_POWERPC_I8259_H */
similarity index 75%
rename from include/asm-ppc64/iommu.h
rename to include/asm-powerpc/iommu.h
index c2f3b6e..9d91bdd 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * iommu.h
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
  * Rewrite, cleanup:
  * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
@@ -22,6 +21,7 @@
 #ifndef _ASM_IOMMU_H
 #define _ASM_IOMMU_H
 
+#include <linux/config.h>
 #include <asm/types.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
 
 /*
  * IOMAP_MAX_ORDER defines the largest contiguous block
- * of dma (tce) space we can get.  IOMAP_MAX_ORDER = 13
+ * of dma space we can get.  IOMAP_MAX_ORDER = 13
  * allows up to 2**12 pages (4096 * 4096) = 16 MB
  */
 #define IOMAP_MAX_ORDER 13
 
-/*
- * Tces come in two formats, one for the virtual bus and a different
- * format for PCI
- */
-#define TCE_VB  0
-#define TCE_PCI 1
-
-/* tce_entry
- * Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
- * abstracted so layout is irrelevant.
- */
-union tce_entry {
-       unsigned long te_word;
-       struct {
-               unsigned int  tb_cacheBits :6;  /* Cache hash bits - not used */
-               unsigned int  tb_rsvd      :6;
-               unsigned long tb_rpn       :40; /* Real page number */
-               unsigned int  tb_valid     :1;  /* Tce is valid (vb only) */
-               unsigned int  tb_allio     :1;  /* Tce is valid for all lps (vb only) */
-               unsigned int  tb_lpindex   :8;  /* LpIndex for user of TCE (vb only) */
-               unsigned int  tb_pciwr     :1;  /* Write allowed (pci only) */
-               unsigned int  tb_rdwr      :1;  /* Read allowed  (pci), Write allowed (vb) */
-       } te_bits;
-#define te_cacheBits te_bits.tb_cacheBits
-#define te_rpn       te_bits.tb_rpn
-#define te_valid     te_bits.tb_valid
-#define te_allio     te_bits.tb_allio
-#define te_lpindex   te_bits.tb_lpindex
-#define te_pciwr     te_bits.tb_pciwr
-#define te_rdwr      te_bits.tb_rdwr
-};
-
-
 struct iommu_table {
        unsigned long  it_busno;     /* Bus number this table belongs to */
        unsigned long  it_size;      /* Size of iommu table in entries */
@@ -83,6 +50,7 @@ struct iommu_table {
 };
 
 struct scatterlist;
+struct device_node;
 
 #ifdef CONFIG_PPC_MULTIPLATFORM
 
@@ -104,9 +72,8 @@ extern void iommu_devnode_init_pSeries(struct device_node *dn);
 
 #ifdef CONFIG_PPC_ISERIES
 
-struct iSeries_Device_Node;
 /* Creates table for an individual device node */
-extern void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn);
+extern void iommu_devnode_init_iSeries(struct device_node *dn);
 
 #endif /* CONFIG_PPC_ISERIES */
 
similarity index 83%
rename from include/asm-ppc/irq.h
rename to include/asm-powerpc/irq.h
index bd96748..07c2b3f 100644 (file)
@@ -1,11 +1,23 @@
 #ifdef __KERNEL__
-#ifndef _ASM_IRQ_H
-#define _ASM_IRQ_H
+#ifndef _ASM_POWERPC_IRQ_H
+#define _ASM_POWERPC_IRQ_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
 
 #include <linux/config.h>
-#include <asm/machdep.h>               /* ppc_md */
+#include <linux/threads.h>
+
+#include <asm/types.h>
 #include <asm/atomic.h>
 
+/* this number is used when no interrupt has been assigned */
+#define NO_IRQ                 (-1)
+
 /*
  * These constants are used for passing information about interrupt
  * signal polarity and level/edge sensing to the low-level PIC chip
  */
 #define ARCH_HAS_IRQ_PER_CPU
 
+#define get_irq_desc(irq) (&irq_desc[(irq)])
+
+/* Define a way to iterate across irqs. */
+#define for_each_irq(i) \
+       for ((i) = 0; (i) < NR_IRQS; ++(i))
+
+#ifdef CONFIG_PPC64
+
+/*
+ * Maximum number of interrupt sources that we can handle.
+ */
+#define NR_IRQS                512
+
+/* Interrupt numbers are virtual in case they are sparsely
+ * distributed by the hardware.
+ */
+extern unsigned int virt_irq_to_real_map[NR_IRQS];
+
+/* Create a mapping for a real_irq if it doesn't already exist.
+ * Return the virtual irq as a convenience.
+ */
+int virt_irq_create_mapping(unsigned int real_irq);
+void virt_irq_init(void);
+
+static inline unsigned int virt_irq_to_real(unsigned int virt_irq)
+{
+       return virt_irq_to_real_map[virt_irq];
+}
+
+extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
+
+/*
+ * List of interrupt controllers.
+ */
+#define IC_INVALID    0
+#define IC_OPEN_PIC   1
+#define IC_PPC_XIC    2
+#define IC_BPA_IIC    3
+#define IC_ISERIES    4
+
+extern u64 ppc64_interrupt_controller;
+
+#else /* 32-bit */
+
 #if defined(CONFIG_40x)
 #include <asm/ibm4xx.h>
 
 #define NR_UIC_IRQS UIC_WIDTH
 #define NR_IRQS                ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS)
 #endif
-static __inline__ int
-irq_canonicalize(int irq)
-{
-       return (irq);
-}
 
 #elif defined(CONFIG_44x)
 #include <asm/ibm44x.h>
@@ -78,12 +129,6 @@ irq_canonicalize(int irq)
 #define        NR_UIC_IRQS     32
 #define        NR_IRQS         ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS)
 
-static __inline__ int
-irq_canonicalize(int irq)
-{
-       return (irq);
-}
-
 #elif defined(CONFIG_8xx)
 
 /* Now include the board configuration specific associations.
@@ -170,20 +215,9 @@ irq_canonicalize(int irq)
  */
 #define        mk_int_int_mask(IL) (1 << (7 - (IL/2)))
 
-/* always the same on 8xx -- Cort */
-static __inline__ int irq_canonicalize(int irq)
-{
-       return irq;
-}
-
 #elif defined(CONFIG_83xx)
 #include <asm/mpc83xx.h>
 
-static __inline__ int irq_canonicalize(int irq)
-{
-       return irq;
-}
-
 #define        NR_IRQS (NR_IPIC_INTS)
 
 #elif defined(CONFIG_85xx)
@@ -307,17 +341,13 @@ static __inline__ int irq_canonicalize(int irq)
 #define        SIU_INT_PC1             ((uint)0x3e+CPM_IRQ_OFFSET)
 #define        SIU_INT_PC0             ((uint)0x3f+CPM_IRQ_OFFSET)
 
-static __inline__ int irq_canonicalize(int irq)
-{
-       return irq;
-}
-
 #else /* CONFIG_40x + CONFIG_8xx */
 /*
  * this is the # irq's for all ppc arch's (pmac/chrp/prep)
  * so it is the max of them all
  */
 #define NR_IRQS                        256
+#define __DO_IRQ_CANON 1
 
 #ifndef CONFIG_8260
 
@@ -394,25 +424,79 @@ static __inline__ int irq_canonicalize(int irq)
 
 #endif /* CONFIG_8260 */
 
+#endif
+
+#define NR_MASK_WORDS  ((NR_IRQS + 31) / 32)
+/* pedantic: these are long because they are used with set_bit --RR */
+extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
+extern atomic_t ppc_n_lost_interrupts;
+
+#endif
+
 /*
- * This gets called from serial.c, which is now used on
- * powermacs as well as prep/chrp boxes.
- * Prep and chrp both have cascaded 8259 PICs.
+ * Because many systems have two overlapping names spaces for
+ * interrupts (ISA and XICS for example), and the ISA interrupts
+ * have historically not been easy to renumber, we allow ISA
+ * interrupts to take values 0 - 15, and shift up the remaining
+ * interrupts by 0x10.
  */
+#define NUM_ISA_INTERRUPTS     0x10
+extern int __irq_offset_value;
+
+static inline int irq_offset_up(int irq)
+{
+       return(irq + __irq_offset_value);
+}
+
+static inline int irq_offset_down(int irq)
+{
+       return(irq - __irq_offset_value);
+}
+
+static inline int irq_offset_value(void)
+{
+       return __irq_offset_value;
+}
+
+#ifdef __DO_IRQ_CANON
+extern int ppc_do_canonicalize_irqs;
+#else
+#define ppc_do_canonicalize_irqs       0
+#endif
+
 static __inline__ int irq_canonicalize(int irq)
 {
-       if (ppc_md.irq_canonicalize)
-               return ppc_md.irq_canonicalize(irq);
+       if (ppc_do_canonicalize_irqs && irq == 2)
+               irq = 9;
        return irq;
 }
 
-#endif
+extern int distribute_irqs;
 
-#define NR_MASK_WORDS  ((NR_IRQS + 31) / 32)
-/* pedantic: these are long because they are used with set_bit --RR */
-extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
-extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
-extern atomic_t ppc_n_lost_interrupts;
+struct irqaction;
+struct pt_regs;
+
+#ifdef CONFIG_IRQSTACKS
+/*
+ * Per-cpu stacks for handling hard and soft interrupts.
+ */
+extern struct thread_info *hardirq_ctx[NR_CPUS];
+extern struct thread_info *softirq_ctx[NR_CPUS];
+
+extern void irq_ctx_init(void);
+extern void call_do_softirq(struct thread_info *tp);
+extern int call_handle_IRQ_event(int irq, struct pt_regs *regs,
+                       struct irqaction *action, struct thread_info *tp);
+
+#define __ARCH_HAS_DO_SOFTIRQ
+
+#else
+#define irq_ctx_init()
+
+#endif /* CONFIG_IRQSTACKS */
+
+extern void do_IRQ(struct pt_regs *regs);
 
 #endif /* _ASM_IRQ_H */
 #endif /* __KERNEL__ */
similarity index 78%
rename from include/asm-ppc64/kdebug.h
rename to include/asm-powerpc/kdebug.h
index d383d16..9dcbac6 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _PPC64_KDEBUG_H
-#define _PPC64_KDEBUG_H 1
+#ifndef _ASM_POWERPC_KDEBUG_H
+#define _ASM_POWERPC_KDEBUG_H
 
 /* nearly identical to x86_64/i386 code */
 
@@ -21,7 +21,7 @@ struct die_args {
    then free.
  */
 int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *ppc64_die_chain;
+extern struct notifier_block *powerpc_die_chain;
 
 /* Grossly misnamed. */
 enum die_val {
@@ -30,14 +30,13 @@ enum die_val {
        DIE_DABR_MATCH,
        DIE_BPT,
        DIE_SSTEP,
-       DIE_GPF,
        DIE_PAGE_FAULT,
 };
 
 static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
 {
        struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
-       return notifier_call_chain(&ppc64_die_chain, val, &args);
+       return notifier_call_chain(&powerpc_die_chain, val, &args);
 }
 
-#endif
+#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/include/asm-powerpc/kmap_types.h b/include/asm-powerpc/kmap_types.h
new file mode 100644 (file)
index 0000000..b6bac6f
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _ASM_POWERPC_KMAP_TYPES_H
+#define _ASM_POWERPC_KMAP_TYPES_H
+
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+enum km_type {
+       KM_BOUNCE_READ,
+       KM_SKB_SUNRPC_DATA,
+       KM_SKB_DATA_SOFTIRQ,
+       KM_USER0,
+       KM_USER1,
+       KM_BIO_SRC_IRQ,
+       KM_BIO_DST_IRQ,
+       KM_PTE0,
+       KM_PTE1,
+       KM_IRQ0,
+       KM_IRQ1,
+       KM_SOFTIRQ0,
+       KM_SOFTIRQ1,
+       KM_PPC_SYNC_PAGE,
+       KM_PPC_SYNC_ICACHE,
+       KM_TYPE_NR
+};
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KMAP_TYPES_H */
similarity index 95%
rename from include/asm-ppc64/kprobes.h
rename to include/asm-powerpc/kprobes.h
index d9129d2..b2f09f1 100644 (file)
@@ -1,8 +1,7 @@
-#ifndef _ASM_KPROBES_H
-#define _ASM_KPROBES_H
+#ifndef _ASM_POWERPC_KPROBES_H
+#define _ASM_POWERPC_KPROBES_H
 /*
  *  Kernel Probes (KProbes)
- *  include/asm-ppc64/kprobes.h
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -64,4 +63,4 @@ static inline int kprobe_exceptions_notify(struct notifier_block *self,
        return 0;
 }
 #endif
-#endif                         /* _ASM_KPROBES_H */
+#endif /* _ASM_POWERPC_KPROBES_H */
similarity index 97%
rename from include/asm-ppc64/lmb.h
rename to include/asm-powerpc/lmb.h
index de91e03..ea0afe3 100644 (file)
@@ -50,7 +50,7 @@ extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long,
 extern unsigned long __init lmb_phys_mem_size(void);
 extern unsigned long __init lmb_end_of_DRAM(void);
 extern unsigned long __init lmb_abs_to_phys(unsigned long);
-extern void __init lmb_enforce_memory_limit(void);
+extern void __init lmb_enforce_memory_limit(unsigned long);
 
 extern void lmb_dump_all(void);
 
similarity index 58%
rename from include/asm-ppc64/machdep.h
rename to include/asm-powerpc/machdep.h
index 8027160..b3a93b4 100644 (file)
@@ -1,6 +1,6 @@
-#ifdef __KERNEL__
 #ifndef _PPC64_MACHDEP_H
 #define _PPC64_MACHDEP_H
+#ifdef __KERNEL__
 
 /*
  * This program is free software; you can redistribute it and/or
 
 #include <asm/setup.h>
 
+/* We export this macro for external modules like Alsa to know if
+ * ppc_md.feature_call is implemented or not
+ */
+#define CONFIG_PPC_HAS_FEATURE_CALLS
+
 struct pt_regs;
 struct pci_bus;        
 struct device_node;
@@ -39,6 +44,7 @@ struct smp_ops_t {
 #endif
 
 struct machdep_calls {
+#ifdef CONFIG_PPC64
        void            (*hpte_invalidate)(unsigned long slot,
                                           unsigned long va,
                                           int large,
@@ -56,9 +62,8 @@ struct machdep_calls {
                                       unsigned long vflags,
                                       unsigned long rflags);
        long            (*hpte_remove)(unsigned long hpte_group);
-       void            (*flush_hash_range)(unsigned long context,
-                                           unsigned long number,
-                                           int local);
+       void            (*flush_hash_range)(unsigned long number, int local);
+
        /* special for kexec, to be called in real mode, linar mapping is
         * destroyed as well */
        void            (*hpte_clear_all)(void);
@@ -75,18 +80,21 @@ struct machdep_calls {
        void            (*iommu_dev_setup)(struct pci_dev *dev);
        void            (*iommu_bus_setup)(struct pci_bus *bus);
        void            (*irq_bus_setup)(struct pci_bus *bus);
+#endif
 
        int             (*probe)(int platform);
        void            (*setup_arch)(void);
        void            (*init_early)(void);
        /* Optional, may be NULL. */
-       void            (*get_cpuinfo)(struct seq_file *m);
+       void            (*show_cpuinfo)(struct seq_file *m);
+       void            (*show_percpuinfo)(struct seq_file *m, int i);
 
        void            (*init_IRQ)(void);
        int             (*get_irq)(struct pt_regs *);
        void            (*cpu_irq_down)(int secondary);
 
        /* PCI stuff */
+       /* Called after scanning the bus, before allocating resources */
        void            (*pcibios_fixup)(void);
        int             (*pci_probe_mode)(struct pci_bus *);
 
@@ -96,9 +104,13 @@ struct machdep_calls {
        void            (*panic)(char *str);
        void            (*cpu_die)(void);
 
+       long            (*time_init)(void); /* Optional, may be NULL */
+
        int             (*set_rtc_time)(struct rtc_time *);
        void            (*get_rtc_time)(struct rtc_time *);
-       void            (*get_boot_time)(struct rtc_time *);
+       unsigned long   (*get_boot_time)(void);
+       unsigned char   (*rtc_read_val)(int addr);
+       void            (*rtc_write_val)(int addr, unsigned char val);
 
        void            (*calibrate_decr)(void);
 
@@ -107,10 +119,12 @@ struct machdep_calls {
        /* Interface for platform error logging */
        void            (*log_error)(char *buf, unsigned int err_type, int fatal);
 
+       unsigned char   (*nvram_read_val)(int addr);
+       void            (*nvram_write_val)(int addr, unsigned char val);
        ssize_t         (*nvram_write)(char *buf, size_t count, loff_t *index);
        ssize_t         (*nvram_read)(char *buf, size_t count, loff_t *index);  
        ssize_t         (*nvram_size)(void);            
-       int             (*nvram_sync)(void);
+       void            (*nvram_sync)(void);
 
        /* Exception handlers */
        void            (*system_reset_exception)(struct pt_regs *regs);
@@ -135,14 +149,92 @@ struct machdep_calls {
                                                pgprot_t vma_prot);
 
        /* Idle loop for this platform, leave empty for default idle loop */
-       int             (*idle_loop)(void);
+       void            (*idle_loop)(void);
 
-       /* Function to enable pmcs for this platform, called once per cpu. */
+       /* Function to enable performance monitor counters for this
+          platform, called once per cpu. */
        void            (*enable_pmcs)(void);
+
+#ifdef CONFIG_PPC32    /* XXX for now */
+       /* A general init function, called by ppc_init in init/main.c.
+          May be NULL. */
+       void            (*init)(void);
+
+       void            (*idle)(void);
+       void            (*power_save)(void);
+
+       void            (*heartbeat)(void);
+       unsigned long   heartbeat_reset;
+       unsigned long   heartbeat_count;
+
+       void            (*setup_io_mappings)(void);
+
+       void            (*early_serial_map)(void);
+       void            (*kgdb_map_scc)(void);
+
+       /*
+        * optional PCI "hooks"
+        */
+
+       /* Called after PPC generic resource fixup to perform
+          machine specific fixups */
+       void (*pcibios_fixup_resources)(struct pci_dev *);
+
+       /* Called for each PCI bus in the system when it's probed */
+       void (*pcibios_fixup_bus)(struct pci_bus *);
+
+       /* Called when pci_enable_device() is called (initial=0) or
+        * when a device with no assigned resource is found (initial=1).
+        * Returns 0 to allow assignment/enabling of the device. */
+       int  (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
+
+       /* For interrupt routing */
+       unsigned char (*pci_swizzle)(struct pci_dev *, unsigned char *);
+       int (*pci_map_irq)(struct pci_dev *, unsigned char, unsigned char);
+
+       /* Called in indirect_* to avoid touching devices */
+       int (*pci_exclude_device)(unsigned char, unsigned char);
+
+       /* Called at then very end of pcibios_init() */
+       void (*pcibios_after_init)(void);
+
+       /* this is for modules, since _machine can be a define -- Cort */
+       int ppc_machine;
+
+#ifdef CONFIG_KEXEC
+       /* Called to shutdown machine specific hardware not already controlled
+        * by other drivers.
+        * XXX Should we move this one out of kexec scope?
+        */
+       void (*machine_shutdown)(void);
+
+       /* Called to do the minimal shutdown needed to run a kexec'd kernel
+        * to run successfully.
+        * XXX Should we move this one out of kexec scope?
+        */
+       void (*machine_crash_shutdown)(void);
+
+       /* Called to do what every setup is needed on image and the
+        * reboot code buffer. Returns 0 on success.
+        * Provide your own (maybe dummy) implementation if your platform
+        * claims to support kexec.
+        */
+       int (*machine_kexec_prepare)(struct kimage *image);
+
+       /* Called to handle any machine specific cleanup on image */
+       void (*machine_kexec_cleanup)(struct kimage *image);
+
+       /* Called to perform the _real_ kexec.
+        * Do NOT allocate memory or fail here. We are past the point of
+        * no return.
+        */
+       void (*machine_kexec)(struct kimage *image);
+#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_PPC32 */
 };
 
-extern int default_idle(void);
-extern int native_idle(void);
+extern void default_idle(void);
+extern void native_idle(void);
 
 extern struct machdep_calls ppc_md;
 extern char cmd_line[COMMAND_LINE_SIZE];
@@ -162,6 +254,13 @@ extern sys_ctrler_t sys_ctrler;
 
 #endif /* CONFIG_PPC_PMAC */
 
+extern void setup_pci_ptrs(void);
+
+#ifdef CONFIG_SMP
+/* Poor default implementations */
+extern void __devinit smp_generic_give_timebase(void);
+extern void __devinit smp_generic_take_timebase(void);
+#endif /* CONFIG_SMP */
 
 
 /* Functions to produce codes on the leds.
@@ -181,5 +280,5 @@ static inline void log_error(char *buf, unsigned int err_type, int fatal)
                ppc_md.log_error(buf, err_type, fatal);
 }
 
-#endif /* _PPC64_MACHDEP_H */
 #endif /* __KERNEL__ */
+#endif /* _PPC64_MACHDEP_H */
similarity index 95%
rename from arch/ppc64/kernel/mpic.h
rename to include/asm-powerpc/mpic.h
index ca78a7f..7083d1f 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef _ASM_POWERPC_MPIC_H
+#define _ASM_POWERPC_MPIC_H
+
 #include <linux/irq.h>
 
 /*
@@ -258,12 +261,21 @@ extern void mpic_setup_this_cpu(void);
 /* Clean up for kexec (or cpu offline or ...) */
 extern void mpic_teardown_this_cpu(int secondary);
 
+/* Get the current cpu priority for this cpu (0..15) */
+extern int mpic_cpu_get_priority(void);
+
+/* Set the current cpu priority for this cpu */
+extern void mpic_cpu_set_priority(int prio);
+
 /* Request IPIs on primary mpic */
 extern void mpic_request_ipis(void);
 
 /* Send an IPI (non offseted number 0..3) */
 extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
 
+/* Send a message (IPI) to a given target (cpu number or MSG_*) */
+void smp_mpic_message_pass(int target, int msg);
+
 /* Fetch interrupt from a given mpic */
 extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
 /* This one gets to the primary mpic */
@@ -271,3 +283,5 @@ extern int mpic_get_irq(struct pt_regs *regs);
 
 /* global mpic for pSeries */
 extern struct mpic *pSeries_mpic;
+
+#endif /* _ASM_POWERPC_MPIC_H */
similarity index 94%
rename from include/asm-ppc/of_device.h
rename to include/asm-powerpc/of_device.h
index 575bce4..ddb16aa 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __OF_DEVICE_H__
-#define __OF_DEVICE_H__
+#ifndef _ASM_POWERPC_OF_DEVICE_H
+#define _ASM_POWERPC_OF_DEVICE_H
 
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
@@ -61,5 +61,4 @@ extern struct of_device *of_platform_device_create(struct device_node *np,
                                                   struct device *parent);
 extern void of_release_dev(struct device *dev);
 
-#endif /* __OF_DEVICE_H__ */
-
+#endif /* _ASM_POWERPC_OF_DEVICE_H */
similarity index 83%
rename from include/asm-ppc64/oprofile_impl.h
rename to include/asm-powerpc/oprofile_impl.h
index b04f1df..8013cd2 100644 (file)
@@ -9,39 +9,49 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#ifndef OP_IMPL_H
-#define OP_IMPL_H 1
+#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
+#define _ASM_POWERPC_OPROFILE_IMPL_H
 
 #define OP_MAX_COUNTER 8
 
 /* Per-counter configuration as set via oprofilefs.  */
 struct op_counter_config {
+#ifdef __powerpc64__
        unsigned long valid;
+#endif
        unsigned long enabled;
        unsigned long event;
        unsigned long count;
        unsigned long kernel;
+#ifdef __powerpc64__
        /* We dont support per counter user/kernel selection */
+#endif
        unsigned long user;
        unsigned long unit_mask;
 };
 
 /* System-wide configuration as set via oprofilefs.  */
 struct op_system_config {
+#ifdef __powerpc64__
        unsigned long mmcr0;
        unsigned long mmcr1;
        unsigned long mmcra;
+#endif
        unsigned long enable_kernel;
        unsigned long enable_user;
+#ifdef __powerpc64__
        unsigned long backtrace_spinlocks;
+#endif
 };
 
 /* Per-arch configuration */
-struct op_ppc64_model {
+struct op_powerpc_model {
        void (*reg_setup) (struct op_counter_config *,
                           struct op_system_config *,
                           int num_counters);
+#ifdef __powerpc64__
        void (*cpu_setup) (void *);
+#endif
        void (*start) (struct op_counter_config *);
        void (*stop) (void);
        void (*handle_interrupt) (struct pt_regs *,
@@ -49,8 +59,9 @@ struct op_ppc64_model {
        int num_counters;
 };
 
-extern struct op_ppc64_model op_model_rs64;
-extern struct op_ppc64_model op_model_power4;
+#ifdef __powerpc64__
+extern struct op_powerpc_model op_model_rs64;
+extern struct op_powerpc_model op_model_power4;
 
 static inline unsigned int ctr_read(unsigned int i)
 {
@@ -107,5 +118,6 @@ static inline void ctr_write(unsigned int i, unsigned int val)
                break;
        }
 }
+#endif /* __powerpc64__ */
 
-#endif
+#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
similarity index 66%
rename from include/asm-ppc64/pmc.h
rename to include/asm-powerpc/pmc.h
index d1d297d..2f3c3fc 100644 (file)
@@ -16,8 +16,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
-#ifndef _PPC64_PMC_H
-#define _PPC64_PMC_H
+#ifndef _POWERPC_PMC_H
+#define _POWERPC_PMC_H
 
 #include <asm/ptrace.h>
 
@@ -26,6 +26,21 @@ typedef void (*perf_irq_t)(struct pt_regs *);
 int reserve_pmc_hardware(perf_irq_t new_perf_irq);
 void release_pmc_hardware(void);
 
+#ifdef CONFIG_PPC64
 void power4_enable_pmcs(void);
+#endif
 
-#endif /* _PPC64_PMC_H */
+#ifdef CONFIG_FSL_BOOKE
+void init_pmc_stop(int ctr);
+void set_pmc_event(int ctr, int event);
+void set_pmc_user_kernel(int ctr, int user, int kernel);
+void set_pmc_marked(int ctr, int mark0, int mark1);
+void pmc_start_ctr(int ctr, int enable);
+void pmc_start_ctrs(int enable);
+void pmc_stop_ctrs(void);
+void dump_pmcs(void);
+
+extern struct op_powerpc_model op_model_fsl_booke;
+#endif
+
+#endif /* _POWERPC_PMC_H */
similarity index 88%
rename from include/asm-ppc64/posix_types.h
rename to include/asm-powerpc/posix_types.h
index 516de72..c639107 100644 (file)
@@ -1,44 +1,54 @@
-#ifndef _PPC64_POSIX_TYPES_H
-#define _PPC64_POSIX_TYPES_H
+#ifndef _ASM_POWERPC_POSIX_TYPES_H
+#define _ASM_POWERPC_POSIX_TYPES_H
 
 /*
  * This file is generally used by user-level software, so you need to
  * be a little careful about namespace pollution etc.  Also, we cannot
  * assume GCC is being used.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 typedef unsigned long  __kernel_ino_t;
-typedef unsigned long          __kernel_nlink_t;
 typedef unsigned int   __kernel_mode_t;
 typedef long           __kernel_off_t;
-typedef long long      __kernel_loff_t;
 typedef int            __kernel_pid_t;
-typedef int             __kernel_ipc_pid_t;
 typedef unsigned int   __kernel_uid_t;
 typedef unsigned int   __kernel_gid_t;
-typedef unsigned long  __kernel_size_t;
-typedef long           __kernel_ssize_t;
 typedef long           __kernel_ptrdiff_t;
 typedef long           __kernel_time_t;
+typedef long           __kernel_clock_t;
 typedef int            __kernel_timer_t;
 typedef int            __kernel_clockid_t;
 typedef long           __kernel_suseconds_t;
-typedef long           __kernel_clock_t;
 typedef int            __kernel_daddr_t;
 typedef char *         __kernel_caddr_t;
 typedef unsigned short __kernel_uid16_t;
 typedef unsigned short __kernel_gid16_t;
 typedef unsigned int   __kernel_uid32_t;
 typedef unsigned int   __kernel_gid32_t;
-
 typedef unsigned int   __kernel_old_uid_t;
 typedef unsigned int   __kernel_old_gid_t;
+
+#ifdef __powerpc64__
+typedef unsigned long          __kernel_nlink_t;
+typedef int             __kernel_ipc_pid_t;
+typedef unsigned long  __kernel_size_t;
+typedef long           __kernel_ssize_t;
 typedef unsigned long  __kernel_old_dev_t;
+#else
+typedef unsigned short __kernel_nlink_t;
+typedef short          __kernel_ipc_pid_t;
+typedef unsigned int   __kernel_size_t;
+typedef int            __kernel_ssize_t;
+typedef unsigned int   __kernel_old_dev_t;
+#endif
+
+#ifdef __powerpc64__
+typedef long long      __kernel_loff_t;
+#else
+#ifdef __GNUC__
+typedef long long      __kernel_loff_t;
+#endif
+#endif
 
 typedef struct {
        int     val[2];
@@ -116,4 +126,4 @@ static __inline__ void __FD_ZERO(__kernel_fd_set *p)
 
 #endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
 #endif /* __GNUC__ */
-#endif /* _PPC64_POSIX_TYPES_H */
+#endif /* _ASM_POWERPC_POSIX_TYPES_H */
similarity index 94%
rename from arch/ppc64/kernel/pci.h
rename to include/asm-powerpc/ppc-pci.h
index 5eb2cc3..a88728f 100644 (file)
@@ -6,8 +6,8 @@
  *      as published by the Free Software Foundation; either version
  *      2 of the License, or (at your option) any later version.
  */
-#ifndef __PPC_KERNEL_PCI_H__
-#define __PPC_KERNEL_PCI_H__
+#ifndef _ASM_POWERPC_PPC_PCI_H
+#define _ASM_POWERPC_PPC_PCI_H
 
 #include <linux/pci.h>
 #include <asm/pci-bridge.h>
@@ -51,4 +51,4 @@ extern unsigned long pci_probe_only;
 extern unsigned long pci_assign_all_buses;
 extern int pci_read_irq_line(struct pci_dev *pci_dev);
 
-#endif /* __PPC_KERNEL_PCI_H__ */
+#endif /* _ASM_POWERPC_PPC_PCI_H */
similarity index 54%
rename from include/asm-ppc/ppc_asm.h
rename to include/asm-powerpc/ppc_asm.h
index bb53e2d..96367e0 100644 (file)
@@ -1,38 +1,42 @@
 /*
- * include/asm-ppc/ppc_asm.h
- *
- * Definitions used by various bits of low-level assembly code on PowerPC.
- *
  * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
  */
+#ifndef _ASM_POWERPC_PPC_ASM_H
+#define _ASM_POWERPC_PPC_ASM_H
 
+#include <linux/stringify.h>
 #include <linux/config.h>
 
+#ifdef __ASSEMBLY__
+
 /*
  * Macros for storing registers into and loading registers from
  * exception frames.
  */
+#ifdef __powerpc64__
+#define SAVE_GPR(n, base)      std     n,GPR0+8*(n)(base)
+#define REST_GPR(n, base)      ld      n,GPR0+8*(n)(base)
+#define SAVE_NVGPRS(base)      SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
+#define REST_NVGPRS(base)      REST_8GPRS(14, base); REST_10GPRS(22, base)
+#else
 #define SAVE_GPR(n, base)      stw     n,GPR0+4*(n)(base)
+#define REST_GPR(n, base)      lwz     n,GPR0+4*(n)(base)
+#define SAVE_NVGPRS(base)      SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
+                               SAVE_10GPRS(22, base)
+#define REST_NVGPRS(base)      REST_GPR(13, base); REST_8GPRS(14, base); \
+                               REST_10GPRS(22, base)
+#endif
+
+
 #define SAVE_2GPRS(n, base)    SAVE_GPR(n, base); SAVE_GPR(n+1, base)
 #define SAVE_4GPRS(n, base)    SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
 #define SAVE_8GPRS(n, base)    SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
 #define SAVE_10GPRS(n, base)   SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
-#define REST_GPR(n, base)      lwz     n,GPR0+4*(n)(base)
 #define REST_2GPRS(n, base)    REST_GPR(n, base); REST_GPR(n+1, base)
 #define REST_4GPRS(n, base)    REST_2GPRS(n, base); REST_2GPRS(n+2, base)
 #define REST_8GPRS(n, base)    REST_4GPRS(n, base); REST_4GPRS(n+4, base)
 #define REST_10GPRS(n, base)   REST_8GPRS(n, base); REST_2GPRS(n+8, base)
 
-#define SAVE_NVGPRS(base)      SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
-                               SAVE_10GPRS(22, base)
-#define REST_NVGPRS(base)      REST_GPR(13, base); REST_8GPRS(14, base); \
-                               REST_10GPRS(22, base)
-
 #define SAVE_FPR(n, base)      stfd    n,THREAD_FPR0+8*(n)(base)
 #define SAVE_2FPRS(n, base)    SAVE_FPR(n, base); SAVE_FPR(n+1, base)
 #define SAVE_4FPRS(n, base)    SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
 #define REST_32FPRS(n, base)   REST_16FPRS(n, base); REST_16FPRS(n+16, base)
 
 #define SAVE_VR(n,b,base)      li b,THREAD_VR0+(16*(n));  stvx n,b,base
-#define SAVE_2VR(n,b,base)     SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
-#define SAVE_4VR(n,b,base)     SAVE_2VR(n,b,base); SAVE_2VR(n+2,b,base)
-#define SAVE_8VR(n,b,base)     SAVE_4VR(n,b,base); SAVE_4VR(n+4,b,base)
-#define SAVE_16VR(n,b,base)    SAVE_8VR(n,b,base); SAVE_8VR(n+8,b,base)
-#define SAVE_32VR(n,b,base)    SAVE_16VR(n,b,base); SAVE_16VR(n+16,b,base)
+#define SAVE_2VRS(n,b,base)    SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
+#define SAVE_4VRS(n,b,base)    SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
+#define SAVE_8VRS(n,b,base)    SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
+#define SAVE_16VRS(n,b,base)   SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
+#define SAVE_32VRS(n,b,base)   SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
 #define REST_VR(n,b,base)      li b,THREAD_VR0+(16*(n)); lvx n,b,base
-#define REST_2VR(n,b,base)     REST_VR(n,b,base); REST_VR(n+1,b,base)
-#define REST_4VR(n,b,base)     REST_2VR(n,b,base); REST_2VR(n+2,b,base)
-#define REST_8VR(n,b,base)     REST_4VR(n,b,base); REST_4VR(n+4,b,base)
-#define REST_16VR(n,b,base)    REST_8VR(n,b,base); REST_8VR(n+8,b,base)
-#define REST_32VR(n,b,base)    REST_16VR(n,b,base); REST_16VR(n+16,b,base)
+#define REST_2VRS(n,b,base)    REST_VR(n,b,base); REST_VR(n+1,b,base)
+#define REST_4VRS(n,b,base)    REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
+#define REST_8VRS(n,b,base)    REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
+#define REST_16VRS(n,b,base)   REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
+#define REST_32VRS(n,b,base)   REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
 
 #define SAVE_EVR(n,s,base)     evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
-#define SAVE_2EVR(n,s,base)    SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
-#define SAVE_4EVR(n,s,base)    SAVE_2EVR(n,s,base); SAVE_2EVR(n+2,s,base)
-#define SAVE_8EVR(n,s,base)    SAVE_4EVR(n,s,base); SAVE_4EVR(n+4,s,base)
-#define SAVE_16EVR(n,s,base)   SAVE_8EVR(n,s,base); SAVE_8EVR(n+8,s,base)
-#define SAVE_32EVR(n,s,base)   SAVE_16EVR(n,s,base); SAVE_16EVR(n+16,s,base)
-
+#define SAVE_2EVRS(n,s,base)   SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
+#define SAVE_4EVRS(n,s,base)   SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
+#define SAVE_8EVRS(n,s,base)   SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
+#define SAVE_16EVRS(n,s,base)  SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
+#define SAVE_32EVRS(n,s,base)  SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
 #define REST_EVR(n,s,base)     lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
-#define REST_2EVR(n,s,base)    REST_EVR(n,s,base); REST_EVR(n+1,s,base)
-#define REST_4EVR(n,s,base)    REST_2EVR(n,s,base); REST_2EVR(n+2,s,base)
-#define REST_8EVR(n,s,base)    REST_4EVR(n,s,base); REST_4EVR(n+4,s,base)
-#define REST_16EVR(n,s,base)   REST_8EVR(n,s,base); REST_8EVR(n+8,s,base)
-#define REST_32EVR(n,s,base)   REST_16EVR(n,s,base); REST_16EVR(n+16,s,base)
+#define REST_2EVRS(n,s,base)   REST_EVR(n,s,base); REST_EVR(n+1,s,base)
+#define REST_4EVRS(n,s,base)   REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
+#define REST_8EVRS(n,s,base)   REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
+#define REST_16EVRS(n,s,base)  REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
+#define REST_32EVRS(n,s,base)  REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
+
+/* Macros to adjust thread priority for Iseries hardware multithreading */
+#define HMT_VERY_LOW    or   31,31,31  # very low priority\n"
+#define HMT_LOW                or 1,1,1
+#define HMT_MEDIUM_LOW  or   6,6,6     # medium low priority\n"
+#define HMT_MEDIUM     or 2,2,2
+#define HMT_MEDIUM_HIGH or   5,5,5     # medium high priority\n"
+#define HMT_HIGH       or 3,3,3
+
+/* handle instructions that older assemblers may not know */
+#define RFCI           .long 0x4c000066        /* rfci instruction */
+#define RFDI           .long 0x4c00004e        /* rfdi instruction */
+#define RFMCI          .long 0x4c00004c        /* rfmci instruction */
+
+#ifdef CONFIG_PPC64
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+#define _GLOBAL(name) \
+       .section ".text"; \
+       .align 2 ; \
+       .globl name; \
+       .globl GLUE(.,name); \
+       .section ".opd","aw"; \
+name: \
+       .quad GLUE(.,name); \
+       .quad .TOC.@tocbase; \
+       .quad 0; \
+       .previous; \
+       .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _KPROBE(name) \
+       .section ".kprobes.text","a"; \
+       .align 2 ; \
+       .globl name; \
+       .globl GLUE(.,name); \
+       .section ".opd","aw"; \
+name: \
+       .quad GLUE(.,name); \
+       .quad .TOC.@tocbase; \
+       .quad 0; \
+       .previous; \
+       .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _STATIC(name) \
+       .section ".text"; \
+       .align 2 ; \
+       .section ".opd","aw"; \
+name: \
+       .quad GLUE(.,name); \
+       .quad .TOC.@tocbase; \
+       .quad 0; \
+       .previous; \
+       .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#else /* 32-bit */
+
+#define _GLOBAL(n)     \
+       .text;          \
+       .stabs __stringify(n:F-1),N_FUN,0,0,n;\
+       .globl n;       \
+n:
+
+#define _KPROBE(n)     \
+       .section ".kprobes.text","a";   \
+       .globl  n;      \
+n:
+
+#endif
+
+/* 
+ * LOADADDR( rn, name )
+ *   loads the address of 'name' into 'rn'
+ *
+ * LOADBASE( rn, name )
+ *   loads the address (less the low 16 bits) of 'name' into 'rn'
+ *   suitable for base+disp addressing
+ */
+#ifdef __powerpc64__
+#define LOADADDR(rn,name) \
+       lis     rn,name##@highest;      \
+       ori     rn,rn,name##@higher;    \
+       rldicr  rn,rn,32,31;            \
+       oris    rn,rn,name##@h;         \
+       ori     rn,rn,name##@l
+
+#define LOADBASE(rn,name)              \
+       .section .toc,"aw";             \
+1:     .tc     name[TC],name;          \
+       .previous;                      \
+       ld      rn,1b@toc(r2)
+
+#define OFF(name)      0
+
+#define SET_REG_TO_CONST(reg, value)                   \
+       lis     reg,(((value)>>48)&0xFFFF);             \
+       ori     reg,reg,(((value)>>32)&0xFFFF);         \
+       rldicr  reg,reg,32,31;                          \
+       oris    reg,reg,(((value)>>16)&0xFFFF);         \
+       ori     reg,reg,((value)&0xFFFF);
+
+#define SET_REG_TO_LABEL(reg, label)                   \
+       lis     reg,(label)@highest;                    \
+       ori     reg,reg,(label)@higher;                 \
+       rldicr  reg,reg,32,31;                          \
+       oris    reg,reg,(label)@h;                      \
+       ori     reg,reg,(label)@l;
+
+/* operations for longs and pointers */
+#define LDL    ld
+#define STL    std
+#define CMPI   cmpdi
+
+#else /* 32-bit */
+#define LOADADDR(rn,name) \
+       lis     rn,name@ha;     \
+       addi    rn,rn,name@l
+
+#define LOADBASE(rn,name)      \
+       lis     rn,name@ha
+
+#define OFF(name)      name@l
+
+/* operations for longs and pointers */
+#define LDL    lwz
+#define STL    stw
+#define CMPI   cmpwi
 
+#endif
+
+/* various errata or part fixups */
 #ifdef CONFIG_PPC601_SYNC_FIX
 #define SYNC                           \
 BEGIN_FTR_SECTION                      \
@@ -93,6 +230,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
 #define ISYNC_601
 #endif
 
+
 #ifndef CONFIG_SMP
 #define TLBSYNC
 #else /* CONFIG_SMP */
@@ -104,6 +242,7 @@ BEGIN_FTR_SECTION                   \
 END_FTR_SECTION_IFCLR(CPU_FTR_601)
 #endif
 
+       
 /*
  * This instruction is not implemented on the PPC 603 or 601; however, on
  * the 403GCX and 405GP tlbia IS defined and tlbie is not.
@@ -121,14 +260,44 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
        bdnz    0b
 #endif
 
-#ifdef CONFIG_BOOKE
+
+#ifdef CONFIG_IBM405_ERR77
+#define PPC405_ERR77(ra,rb)    dcbt    ra, rb;
+#define        PPC405_ERR77_SYNC       sync;
+#else
+#define PPC405_ERR77(ra,rb)
+#define PPC405_ERR77_SYNC
+#endif
+
+
+#ifdef CONFIG_IBM440EP_ERR42
+#define PPC440EP_ERR42 isync
+#else
+#define PPC440EP_ERR42
+#endif
+
+
+#if defined(CONFIG_BOOKE)
 #define tophys(rd,rs)                          \
        addis   rd,rs,0
 
 #define tovirt(rd,rs)                          \
        addis   rd,rs,0
 
-#else  /* CONFIG_BOOKE */
+#elif defined(CONFIG_PPC64)
+/* PPPBBB - DRENG  If KERNELBASE is always 0xC0...,
+ * Then we can easily do this with one asm insn. -Peter
+ */
+#define tophys(rd,rs)                           \
+        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
+        rldicr  rd,rd,32,31;                    \
+        sub     rd,rs,rd
+
+#define tovirt(rd,rs)                           \
+        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
+        rldicr  rd,rd,32,31;                    \
+        add     rd,rs,rd
+#else
 /*
  * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
  * physical base address of RAM at compile time.
@@ -146,22 +315,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
        .align  1;                              \
        .long   0b;                             \
        .previous
-#endif  /* CONFIG_BOOKE */
+#endif
 
-/*
- * On 64-bit cpus, we use the rfid instruction instead of rfi, but
- * we then have to make sure we preserve the top 32 bits except for
- * the 64-bit mode bit, which we clear.
- */
-#ifdef CONFIG_PPC64BRIDGE
-#define        FIX_SRR1(ra, rb)        \
-       mr      rb,ra;          \
-       mfmsr   ra;             \
-       clrldi  ra,ra,1;                /* turn off 64-bit mode */ \
-       rldimi  ra,rb,0,32
-#define        RFI             .long   0x4c000024      /* rfid instruction */
-#define MTMSRD(r)      .long   (0x7c000164 + ((r) << 21))      /* mtmsrd */
-#define CLR_TOP32(r)   rlwinm  (r),(r),0,0,31  /* clear top 32 bits */
+#ifdef CONFIG_PPC64
+#define RFI            rfid
+#define MTMSRD(r)      mtmsrd  r
 
 #else
 #define FIX_SRR1(ra, rb)
@@ -172,24 +330,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
 #endif
 #define MTMSRD(r)      mtmsr   r
 #define CLR_TOP32(r)
-#endif /* CONFIG_PPC64BRIDGE */
-
-#define RFCI           .long 0x4c000066        /* rfci instruction */
-#define RFDI           .long 0x4c00004e        /* rfdi instruction */
-#define RFMCI          .long 0x4c00004c        /* rfmci instruction */
-
-#ifdef CONFIG_IBM405_ERR77
-#define PPC405_ERR77(ra,rb)    dcbt    ra, rb;
-#define        PPC405_ERR77_SYNC       sync;
-#else
-#define PPC405_ERR77(ra,rb)
-#define PPC405_ERR77_SYNC
-#endif
-
-#ifdef CONFIG_IBM440EP_ERR42
-#define PPC440EP_ERR42 isync
-#else
-#define PPC440EP_ERR42
 #endif
 
 /* The boring bits... */
@@ -277,6 +417,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
 #define        fr30    30
 #define        fr31    31
 
+/* AltiVec Registers (VPRs) */
+
 #define        vr0     0
 #define        vr1     1
 #define        vr2     2
@@ -310,6 +452,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
 #define        vr30    30
 #define        vr31    31
 
+/* SPE Registers (EVPRs) */
+
 #define        evr0    0
 #define        evr1    1
 #define        evr2    2
@@ -348,3 +492,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
 #define N_RSYM 64
 #define N_SLINE        68
 #define N_SO   100
+
+#define ASM_CONST(x) x
+#else
+  #define __ASM_CONST(x) x##UL
+  #define ASM_CONST(x) __ASM_CONST(x)
+#endif /*  __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PPC_ASM_H */
similarity index 51%
rename from include/asm-ppc/processor.h
rename to include/asm-powerpc/processor.h
index b05b5d9..9592f53 100644 (file)
@@ -1,21 +1,28 @@
-#ifdef __KERNEL__
-#ifndef __ASM_PPC_PROCESSOR_H
-#define __ASM_PPC_PROCESSOR_H
+#ifndef _ASM_POWERPC_PROCESSOR_H
+#define _ASM_POWERPC_PROCESSOR_H
 
 /*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
  */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
 
 #include <linux/config.h>
-#include <linux/stringify.h>
+#include <asm/reg.h>
 
+#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
-#include <asm/mpc8xx.h>
-#include <asm/reg.h>
+#ifdef CONFIG_PPC64
+#include <asm/systemcfg.h>
+#endif
 
+#ifdef CONFIG_PPC32
+/* 32-bit platform types */
 /* We only need to define a new _MACH_xxx for machines which are part of
  * a configuration which supports more than one type of different machine.
  * This is currently limited to CONFIG_PPC_MULTIPLATFORM and CHRP/PReP/PMac.
 #define _CHRP_IBM      0x05    /* IBM chrp, the longtrail and longtrail 2 */
 #define _CHRP_Pegasos  0x06    /* Genesi/bplan's Pegasos and Pegasos2 */
 
-#define _GLOBAL(n)\
-       .stabs __stringify(n:F-1),N_FUN,0,0,n;\
-       .globl n;\
-n:
-
-/*
- * this is the minimum allowable io space due to the location
- * of the io areas on prep (first one at 0x80000000) but
- * as soon as I get around to remapping the io areas with the BATs
- * to match the mac we can raise this. -- Cort
- */
-#define TASK_SIZE      (CONFIG_TASK_SIZE)
-
-#ifndef __ASSEMBLY__
 #ifdef CONFIG_PPC_MULTIPLATFORM
 extern int _machine;
 
@@ -67,17 +60,49 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
 #else
 #define _machine 0
 #endif /* CONFIG_PPC_MULTIPLATFORM */
+#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PPC64
+/* Platforms supported by PPC64 */
+#define PLATFORM_PSERIES      0x0100
+#define PLATFORM_PSERIES_LPAR 0x0101
+#define PLATFORM_ISERIES_LPAR 0x0201
+#define PLATFORM_LPAR         0x0001
+#define PLATFORM_POWERMAC     0x0400
+#define PLATFORM_MAPLE        0x0500
+#define PLATFORM_BPA          0x1000
+
+/* Compatibility with drivers coming from PPC32 world */
+#define _machine       (systemcfg->platform)
+#define _MACH_Pmac     PLATFORM_POWERMAC
+#endif
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+/* Macros for adjusting thread priority (hardware multi-threading) */
+#define HMT_very_low()   asm volatile("or 31,31,31   # very low priority")
+#define HMT_low()       asm volatile("or 1,1,1      # low priority")
+#define HMT_medium_low() asm volatile("or 6,6,6      # medium low priority")
+#define HMT_medium()    asm volatile("or 2,2,2      # medium priority")
+#define HMT_medium_high() asm volatile("or 5,5,5      # medium high priority")
+#define HMT_high()      asm volatile("or 3,3,3      # high priority")
+
+#ifdef __KERNEL__
+
+extern int have_of;
 
 struct task_struct;
-void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
+void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
 void release_thread(struct task_struct *);
 
 /* Prepare to copy thread state - unlazy all lazy status */
 extern void prepare_to_copy(struct task_struct *tsk);
 
-/*
- * Create a new kernel thread.
- */
+/* Create a new kernel thread. */
 extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
 
 /* Lazy FPU handling on uni-processor */
@@ -85,10 +110,37 @@ extern struct task_struct *last_task_used_math;
 extern struct task_struct *last_task_used_altivec;
 extern struct task_struct *last_task_used_spe;
 
+#ifdef CONFIG_PPC32
+#define TASK_SIZE      (CONFIG_TASK_SIZE)
+
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
 #define TASK_UNMAPPED_BASE     (TASK_SIZE / 8 * 3)
+#endif
+
+#ifdef CONFIG_PPC64
+/* 64-bit user address space is 44-bits (16TB user VM) */
+#define TASK_SIZE_USER64 (0x0000100000000000UL)
+
+/* 
+ * 32-bit user address space is 4GB - 1 page 
+ * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
+ */
+#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
+
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+               TASK_SIZE_USER32 : TASK_SIZE_USER64)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
+
+#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)) ? \
+               TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
+#endif
 
 typedef struct {
        unsigned long seg;
@@ -96,18 +148,31 @@ typedef struct {
 
 struct thread_struct {
        unsigned long   ksp;            /* Kernel stack pointer */
+#ifdef CONFIG_PPC64
+       unsigned long   ksp_vsid;
+#endif
        struct pt_regs  *regs;          /* Pointer to saved register state */
        mm_segment_t    fs;             /* for get_fs() validation */
+#ifdef CONFIG_PPC32
        void            *pgdir;         /* root of page-table tree */
-       int             fpexc_mode;     /* floating-point exception mode */
        signed long     last_syscall;
+#endif
 #if defined(CONFIG_4xx) || defined (CONFIG_BOOKE)
        unsigned long   dbcr0;          /* debug control register values */
        unsigned long   dbcr1;
 #endif
        double          fpr[32];        /* Complete floating point set */
+#ifdef CONFIG_PPC32
        unsigned long   fpscr_pad;      /* fpr ... fpscr must be contiguous */
+#endif
        unsigned long   fpscr;          /* Floating point status */
+       int             fpexc_mode;     /* floating-point exception mode */
+#ifdef CONFIG_PPC64
+       unsigned long   start_tb;       /* Start purr when proc switched in */
+       unsigned long   accum_tb;       /* Total accumilated purr for process */
+       unsigned long   vdso_base;      /* base of the vDSO library */
+#endif
+       unsigned long   dabr;           /* Data address breakpoint register */
 #ifdef CONFIG_ALTIVEC
        /* Complete AltiVec register set */
        vector128       vr[32] __attribute((aligned(16)));
@@ -128,51 +193,58 @@ struct thread_struct {
 
 #define INIT_SP                (sizeof(init_stack) + (unsigned long) &init_stack)
 
+
+#ifdef CONFIG_PPC32
 #define INIT_THREAD { \
        .ksp = INIT_SP, \
        .fs = KERNEL_DS, \
        .pgdir = swapper_pg_dir, \
        .fpexc_mode = MSR_FE0 | MSR_FE1, \
 }
+#else
+#define INIT_THREAD  { \
+       .ksp = INIT_SP, \
+       .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
+       .fs = KERNEL_DS, \
+       .fpr = {0}, \
+       .fpscr = 0, \
+       .fpexc_mode = MSR_FE0|MSR_FE1, \
+}
+#endif
 
 /*
  * Return saved PC of a blocked thread. For now, this is the "user" PC
  */
-#define thread_saved_pc(tsk)   \
-       ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+#define thread_saved_pc(tsk)    \
+        ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
 
 unsigned long get_wchan(struct task_struct *p);
 
-#define KSTK_EIP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
-#define KSTK_ESP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
+#define KSTK_EIP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+#define KSTK_ESP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
 
 /* Get/set floating-point exception mode */
-#define GET_FPEXC_CTL(tsk, adr)        get_fpexc_mode((tsk), (adr))
-#define SET_FPEXC_CTL(tsk, val)        set_fpexc_mode((tsk), (val))
+#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
+#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
 
 extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
 extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
 
-static inline unsigned int __unpack_fe01(unsigned int msr_bits)
+static inline unsigned int __unpack_fe01(unsigned long msr_bits)
 {
        return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
 }
 
-static inline unsigned int __pack_fe01(unsigned int fpmode)
+static inline unsigned long __pack_fe01(unsigned int fpmode)
 {
        return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
 }
 
-/* in process.c - for early bootup debug -- Cort */
-int ll_printk(const char *, ...);
-void ll_puts(const char *);
-
-/* In misc.c */
-void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
-
-#define have_of (_machine == _MACH_chrp || _machine == _MACH_Pmac)
-
+#ifdef CONFIG_PPC64
+#define cpu_relax()    do { HMT_low(); HMT_medium(); barrier(); } while (0)
+#else
 #define cpu_relax()    barrier()
+#endif
 
 /*
  * Prefetch macros.
@@ -181,21 +253,28 @@ void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
 #define ARCH_HAS_PREFETCHW
 #define ARCH_HAS_SPINLOCK_PREFETCH
 
-extern inline void prefetch(const void *x)
+static inline void prefetch(const void *x)
 {
-        __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
+       if (unlikely(!x))
+               return;
+
+       __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
 }
 
-extern inline void prefetchw(const void *x)
+static inline void prefetchw(const void *x)
 {
-        __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
+       if (unlikely(!x))
+               return;
+
+       __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
 }
 
 #define spin_lock_prefetch(x)  prefetchw(x)
 
-extern int emulate_altivec(struct pt_regs *regs);
-
-#endif /* !__ASSEMBLY__ */
+#ifdef CONFIG_PPC64
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+#endif
 
-#endif /* __ASM_PPC_PROCESSOR_H */
 #endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
new file mode 100644 (file)
index 0000000..8a21791
--- /dev/null
@@ -0,0 +1,219 @@
+#ifndef _POWERPC_PROM_H
+#define _POWERPC_PROM_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh computers.
+ *
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <asm/atomic.h>
+
+/* Definitions used by the flattened device tree */
+#define OF_DT_HEADER           0xd00dfeed      /* marker */
+#define OF_DT_BEGIN_NODE       0x1             /* Start of node, full name */
+#define OF_DT_END_NODE         0x2             /* End node */
+#define OF_DT_PROP             0x3             /* Property: name off, size,
+                                                * content */
+#define OF_DT_NOP              0x4             /* nop */
+#define OF_DT_END              0x9
+
+#define OF_DT_VERSION          0x10
+
+/*
+ * This is what gets passed to the kernel by prom_init or kexec
+ *
+ * The dt struct contains the device tree structure, full pathes and
+ * property contents. The dt strings contain a separate block with just
+ * the strings for the property names, and is fully page aligned and
+ * self contained in a page, so that it can be kept around by the kernel,
+ * each property name appears only once in this page (cheap compression)
+ *
+ * the mem_rsvmap contains a map of reserved ranges of physical memory,
+ * passing it here instead of in the device-tree itself greatly simplifies
+ * the job of everybody. It's just a list of u64 pairs (base/size) that
+ * ends when size is 0
+ */
+struct boot_param_header
+{
+       u32     magic;                  /* magic word OF_DT_HEADER */
+       u32     totalsize;              /* total size of DT block */
+       u32     off_dt_struct;          /* offset to structure */
+       u32     off_dt_strings;         /* offset to strings */
+       u32     off_mem_rsvmap;         /* offset to memory reserve map */
+       u32     version;                /* format version */
+       u32     last_comp_version;      /* last compatible version */
+       /* version 2 fields below */
+       u32     boot_cpuid_phys;        /* Physical CPU id we're booting on */
+       /* version 3 fields below */
+       u32     dt_strings_size;        /* size of the DT strings block */
+};
+
+
+
+typedef u32 phandle;
+typedef u32 ihandle;
+
+struct address_range {
+       unsigned long space;
+       unsigned long address;
+       unsigned long size;
+};
+
+struct interrupt_info {
+       int     line;
+       int     sense;          /* +ve/-ve logic, edge or level, etc. */
+};
+
+struct pci_address {
+       u32 a_hi;
+       u32 a_mid;
+       u32 a_lo;
+};
+
+struct isa_address {
+       u32 a_hi;
+       u32 a_lo;
+};
+
+struct isa_range {
+       struct isa_address isa_addr;
+       struct pci_address pci_addr;
+       unsigned int size;
+};
+
+struct reg_property {
+       unsigned long address;
+       unsigned long size;
+};
+
+struct reg_property32 {
+       unsigned int address;
+       unsigned int size;
+};
+
+struct reg_property64 {
+       unsigned long address;
+       unsigned long size;
+};
+
+struct property {
+       char    *name;
+       int     length;
+       unsigned char *value;
+       struct property *next;
+};
+
+struct device_node {
+       char    *name;
+       char    *type;
+       phandle node;
+       phandle linux_phandle;
+       int     n_addrs;
+       struct  address_range *addrs;
+       int     n_intrs;
+       struct  interrupt_info *intrs;
+       char    *full_name;
+
+       struct  property *properties;
+       struct  device_node *parent;
+       struct  device_node *child;
+       struct  device_node *sibling;
+       struct  device_node *next;      /* next device of same type */
+       struct  device_node *allnext;   /* next in list of all nodes */
+       struct  proc_dir_entry *pde;    /* this node's proc directory */
+       struct  kref kref;
+       unsigned long _flags;
+       void    *data;
+};
+
+extern struct device_node *of_chosen;
+
+/* flag descriptions */
+#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
+
+#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
+#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
+{
+       dn->pde = de;
+}
+
+
+/* OBSOLETE: Old style node lookup */
+extern struct device_node *find_devices(const char *name);
+extern struct device_node *find_type_devices(const char *type);
+extern struct device_node *find_path_device(const char *path);
+extern struct device_node *find_compatible_devices(const char *type,
+                                                  const char *compat);
+extern struct device_node *find_all_nodes(void);
+
+/* New style node lookup */
+extern struct device_node *of_find_node_by_name(struct device_node *from,
+       const char *name);
+extern struct device_node *of_find_node_by_type(struct device_node *from,
+       const char *type);
+extern struct device_node *of_find_compatible_node(struct device_node *from,
+       const char *type, const char *compat);
+extern struct device_node *of_find_node_by_path(const char *path);
+extern struct device_node *of_find_node_by_phandle(phandle handle);
+extern struct device_node *of_find_all_nodes(struct device_node *prev);
+extern struct device_node *of_get_parent(const struct device_node *node);
+extern struct device_node *of_get_next_child(const struct device_node *node,
+                                            struct device_node *prev);
+extern struct device_node *of_node_get(struct device_node *node);
+extern void of_node_put(struct device_node *node);
+
+/* For updating the device tree at runtime */
+extern void of_attach_node(struct device_node *);
+extern void of_detach_node(const struct device_node *);
+
+/* Other Prototypes */
+extern void finish_device_tree(void);
+extern void unflatten_device_tree(void);
+extern void early_init_devtree(void *);
+extern int device_is_compatible(struct device_node *device, const char *);
+extern int machine_is_compatible(const char *compat);
+extern unsigned char *get_property(struct device_node *node, const char *name,
+                                  int *lenp);
+extern void print_properties(struct device_node *node);
+extern int prom_n_addr_cells(struct device_node* np);
+extern int prom_n_size_cells(struct device_node* np);
+extern int prom_n_intr_cells(struct device_node* np);
+extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
+extern void prom_add_property(struct device_node* np, struct property* prop);
+
+#ifdef CONFIG_PPC32
+/*
+ * PCI <-> OF matching functions
+ * (XXX should these be here?)
+ */
+struct pci_bus;
+struct pci_dev;
+extern int pci_device_from_OF_node(struct device_node *node,
+                                  u8* bus, u8* devfn);
+extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
+extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
+extern void pci_create_OF_bus_map(void);
+#endif
+
+extern struct resource *request_OF_resource(struct device_node* node,
+                               int index, const char* name_postfix);
+extern int release_OF_resource(struct device_node* node, int index);
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PROM_H */
similarity index 68%
rename from include/asm-ppc/reg.h
rename to include/asm-powerpc/reg.h
index 73c33e3..06a1f0f 100644 (file)
@@ -6,53 +6,99 @@
  * Implementations of the PowerPC Architecture (a.k.a. Green Book) here.
  */
 
+#ifndef _ASM_POWERPC_REG_H
+#define _ASM_POWERPC_REG_H
 #ifdef __KERNEL__
-#ifndef __ASM_PPC_REGS_H__
-#define __ASM_PPC_REGS_H__
 
 #include <linux/stringify.h>
+#include <asm/cputable.h>
 
 /* Pickup Book E specific registers. */
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 #include <asm/reg_booke.h>
 #endif
 
-/* Machine State Register (MSR) Fields */
-#define MSR_SF         (1<<63)
-#define MSR_ISF                (1<<61)
-#define MSR_VEC                (1<<25)         /* Enable AltiVec */
-#define MSR_POW                (1<<18)         /* Enable Power Management */
-#define MSR_WE         (1<<18)         /* Wait State Enable */
-#define MSR_TGPR       (1<<17)         /* TLB Update registers in use */
-#define MSR_CE         (1<<17)         /* Critical Interrupt Enable */
-#define MSR_ILE                (1<<16)         /* Interrupt Little Endian */
-#define MSR_EE         (1<<15)         /* External Interrupt Enable */
-#define MSR_PR         (1<<14)         /* Problem State / Privilege Level */
-#define MSR_FP         (1<<13)         /* Floating Point enable */
-#define MSR_ME         (1<<12)         /* Machine Check Enable */
-#define MSR_FE0                (1<<11)         /* Floating Exception mode 0 */
-#define MSR_SE         (1<<10)         /* Single Step */
-#define MSR_BE         (1<<9)          /* Branch Trace */
-#define MSR_DE         (1<<9)          /* Debug Exception Enable */
-#define MSR_FE1                (1<<8)          /* Floating Exception mode 1 */
-#define MSR_IP         (1<<6)          /* Exception prefix 0x000/0xFFF */
-#define MSR_IR         (1<<5)          /* Instruction Relocate */
-#define MSR_DR         (1<<4)          /* Data Relocate */
-#define MSR_PE         (1<<3)          /* Protection Enable */
-#define MSR_PX         (1<<2)          /* Protection Exclusive Mode */
-#define MSR_RI         (1<<1)          /* Recoverable Exception */
-#define MSR_LE         (1<<0)          /* Little Endian */
+#define MSR_SF_LG      63              /* Enable 64 bit mode */
+#define MSR_ISF_LG     61              /* Interrupt 64b mode valid on 630 */
+#define MSR_HV_LG      60              /* Hypervisor state */
+#define MSR_VEC_LG     25              /* Enable AltiVec */
+#define MSR_POW_LG     18              /* Enable Power Management */
+#define MSR_WE_LG      18              /* Wait State Enable */
+#define MSR_TGPR_LG    17              /* TLB Update registers in use */
+#define MSR_CE_LG      17              /* Critical Interrupt Enable */
+#define MSR_ILE_LG     16              /* Interrupt Little Endian */
+#define MSR_EE_LG      15              /* External Interrupt Enable */
+#define MSR_PR_LG      14              /* Problem State / Privilege Level */
+#define MSR_FP_LG      13              /* Floating Point enable */
+#define MSR_ME_LG      12              /* Machine Check Enable */
+#define MSR_FE0_LG     11              /* Floating Exception mode 0 */
+#define MSR_SE_LG      10              /* Single Step */
+#define MSR_BE_LG      9               /* Branch Trace */
+#define MSR_DE_LG      9               /* Debug Exception Enable */
+#define MSR_FE1_LG     8               /* Floating Exception mode 1 */
+#define MSR_IP_LG      6               /* Exception prefix 0x000/0xFFF */
+#define MSR_IR_LG      5               /* Instruction Relocate */
+#define MSR_DR_LG      4               /* Data Relocate */
+#define MSR_PE_LG      3               /* Protection Enable */
+#define MSR_PX_LG      2               /* Protection Exclusive Mode */
+#define MSR_PMM_LG     2               /* Performance monitor */
+#define MSR_RI_LG      1               /* Recoverable Exception */
+#define MSR_LE_LG      0               /* Little Endian */
 
+#ifdef __ASSEMBLY__
+#define __MASK(X)      (1<<(X))
+#else
+#define __MASK(X)      (1UL<<(X))
+#endif
+
+#define MSR_SF         __MASK(MSR_SF_LG)       /* Enable 64 bit mode */
+#define MSR_ISF                __MASK(MSR_ISF_LG)      /* Interrupt 64b mode valid on 630 */
+#define MSR_HV                 __MASK(MSR_HV_LG)       /* Hypervisor state */
+#define MSR_VEC                __MASK(MSR_VEC_LG)      /* Enable AltiVec */
+#define MSR_POW                __MASK(MSR_POW_LG)      /* Enable Power Management */
+#define MSR_WE         __MASK(MSR_WE_LG)       /* Wait State Enable */
+#define MSR_TGPR       __MASK(MSR_TGPR_LG)     /* TLB Update registers in use */
+#define MSR_CE         __MASK(MSR_CE_LG)       /* Critical Interrupt Enable */
+#define MSR_ILE                __MASK(MSR_ILE_LG)      /* Interrupt Little Endian */
+#define MSR_EE         __MASK(MSR_EE_LG)       /* External Interrupt Enable */
+#define MSR_PR         __MASK(MSR_PR_LG)       /* Problem State / Privilege Level */
+#define MSR_FP         __MASK(MSR_FP_LG)       /* Floating Point enable */
+#define MSR_ME         __MASK(MSR_ME_LG)       /* Machine Check Enable */
+#define MSR_FE0                __MASK(MSR_FE0_LG)      /* Floating Exception mode 0 */
+#define MSR_SE         __MASK(MSR_SE_LG)       /* Single Step */
+#define MSR_BE         __MASK(MSR_BE_LG)       /* Branch Trace */
+#define MSR_DE         __MASK(MSR_DE_LG)       /* Debug Exception Enable */
+#define MSR_FE1                __MASK(MSR_FE1_LG)      /* Floating Exception mode 1 */
+#define MSR_IP         __MASK(MSR_IP_LG)       /* Exception prefix 0x000/0xFFF */
+#define MSR_IR         __MASK(MSR_IR_LG)       /* Instruction Relocate */
+#define MSR_DR         __MASK(MSR_DR_LG)       /* Data Relocate */
+#define MSR_PE         __MASK(MSR_PE_LG)       /* Protection Enable */
+#define MSR_PX         __MASK(MSR_PX_LG)       /* Protection Exclusive Mode */
+#ifndef MSR_PMM
+#define MSR_PMM                __MASK(MSR_PMM_LG)      /* Performance monitor */
+#endif
+#define MSR_RI         __MASK(MSR_RI_LG)       /* Recoverable Exception */
+#define MSR_LE         __MASK(MSR_LE_LG)       /* Little Endian */
+
+#ifdef CONFIG_PPC64
+#define MSR_           MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
+#define MSR_KERNEL      MSR_ | MSR_SF | MSR_HV
+
+#define MSR_USER32     MSR_ | MSR_PR | MSR_EE
+#define MSR_USER64     MSR_USER32 | MSR_SF
+
+#else /* 32-bit */
 /* Default MSR for kernel mode. */
+#ifndef MSR_KERNEL     /* reg_booke.h also defines this */
 #ifdef CONFIG_APUS_FAST_EXCEPT
 #define MSR_KERNEL     (MSR_ME|MSR_IP|MSR_RI|MSR_IR|MSR_DR)
-#endif
-
-#ifndef MSR_KERNEL
+#else
 #define MSR_KERNEL     (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
 #endif
+#endif
 
 #define MSR_USER       (MSR_KERNEL|MSR_PR|MSR_EE)
+#endif
 
 /* Floating Point Status and Control Register (FPSCR) Fields */
 #define FPSCR_FX       0x80000000      /* FPU exception summary */
 #define FPSCR_VX       0x20000000      /* Invalid operation summary */
 #define FPSCR_OX       0x10000000      /* Overflow exception summary */
 #define FPSCR_UX       0x08000000      /* Underflow exception summary */
-#define FPSCR_ZX       0x04000000      /* Zero-devide exception summary */
+#define FPSCR_ZX       0x04000000      /* Zero-divide exception summary */
 #define FPSCR_XX       0x02000000      /* Inexact exception summary */
 #define FPSCR_VXSNAN   0x01000000      /* Invalid op for SNaN */
 #define FPSCR_VXISI    0x00800000      /* Invalid op for Inv - Inv */
 
 /* Special Purpose Registers (SPRNs)*/
 #define SPRN_CTR       0x009   /* Count Register */
+#define SPRN_CTRLF     0x088
+#define SPRN_CTRLT     0x098
+#define   CTRL_RUNLATCH        0x1
 #define SPRN_DABR      0x3F5   /* Data Address Breakpoint Register */
+#define   DABR_TRANSLATION     (1UL << 2)
 #define SPRN_DAR       0x013   /* Data Address Register */
+#define        SPRN_DSISR      0x012   /* Data Storage Interrupt Status Register */
+#define   DSISR_NOHPTE         0x40000000      /* no translation found */
+#define   DSISR_PROTFAULT      0x08000000      /* protection fault */
+#define   DSISR_ISSTORE                0x02000000      /* access was a store */
+#define   DSISR_DABRMATCH      0x00400000      /* hit data breakpoint */
+#define   DSISR_NOSEGMENT      0x00200000      /* STAB/SLB miss */
 #define SPRN_TBRL      0x10C   /* Time Base Read Lower Register (user, R/O) */
 #define SPRN_TBRU      0x10D   /* Time Base Read Upper Register (user, R/O) */
 #define SPRN_TBWL      0x11C   /* Time Base Lower Register (super, R/W) */
 #define DER_EBRKE      0x00000002      /* External Breakpoint Interrupt */
 #define DER_DPIE       0x00000001      /* Dev. Port Nonmaskable Request */
 #define SPRN_DMISS     0x3D0           /* Data TLB Miss Register */
-#define SPRN_DSISR     0x012   /* Data Storage Interrupt Status Register */
 #define SPRN_EAR       0x11A           /* External Address Register */
 #define SPRN_HASH1     0x3D2           /* Primary Hash Address Register */
 #define SPRN_HASH2     0x3D3           /* Secondary Hash Address Resgister */
 #define SPRN_IABR      0x3F2   /* Instruction Address Breakpoint Register */
 #define SPRN_HID4      0x3F4           /* 970 HID4 */
 #define SPRN_HID5      0x3F6           /* 970 HID5 */
+#define        SPRN_HID6       0x3F9   /* BE HID 6 */
+#define          HID6_LB       (0x0F<<12) /* Concurrent Large Page Modes */
+#define          HID6_DLP      (1<<20) /* Disable all large page modes (4K only) */
+#define        SPRN_TSCR       0x399   /* Thread switch control on BE */
+#define        SPRN_TTR        0x39A   /* Thread switch timeout on BE */
+#define          TSCR_DEC_ENABLE       0x200000 /* Decrementer Interrupt */
+#define          TSCR_EE_ENABLE        0x100000 /* External Interrupt */
+#define          TSCR_EE_BOOST         0x080000 /* External Interrupt Boost */
+#define        SPRN_TSC        0x3FD   /* Thread switch control on others */
+#define        SPRN_TST        0x3FC   /* Thread switch timeout on others */
 #if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
 #define SPRN_IAC1      0x3F4           /* Instruction Address Compare 1 */
 #define SPRN_IAC2      0x3F5           /* Instruction Address Compare 2 */
 #define L3CR_L3DO              0x00000040      /* L3 data only mode */
 #define L3CR_PMEN              0x00000004      /* L3 private memory enable */
 #define L3CR_PMSIZ             0x00000001      /* L3 private memory size */
+
 #define SPRN_MSSCR0    0x3f6   /* Memory Subsystem Control Register 0 */
 #define SPRN_MSSSR0    0x3f7   /* Memory Subsystem Status Register 1 */
 #define SPRN_LDSTCR    0x3f8   /* Load/Store control register */
 #define SPRN_LDSTDB    0x3f4   /* */
 #define SPRN_LR                0x008   /* Link Register */
-#define SPRN_MMCR0     0x3B8   /* Monitor Mode Control Register 0 */
-#define SPRN_MMCR1     0x3BC   /* Monitor Mode Control Register 1 */
 #ifndef SPRN_PIR
 #define SPRN_PIR       0x3FF   /* Processor Identification Register */
 #endif
-#define SPRN_PMC1      0x3B9   /* Performance Counter Register 1 */
-#define SPRN_PMC2      0x3BA   /* Performance Counter Register 2 */
-#define SPRN_PMC3      0x3BD   /* Performance Counter Register 3 */
-#define SPRN_PMC4      0x3BE   /* Performance Counter Register 4 */
 #define SPRN_PTEHI     0x3D5   /* 981 7450 PTE HI word (S/W TLB load) */
 #define SPRN_PTELO     0x3D6   /* 982 7450 PTE LO word (S/W TLB load) */
+#define        SPRN_PURR       0x135   /* Processor Utilization of Resources Reg */
 #define SPRN_PVR       0x11F   /* Processor Version Register */
 #define SPRN_RPA       0x3D6   /* Required Physical Address Register */
 #define SPRN_SDA       0x3BF   /* Sampled Data Address Register */
 #define SPRN_VRSAVE    0x100   /* Vector Register Save Register */
 #define SPRN_XER       0x001   /* Fixed Point Exception Register */
 
+/* Performance monitor SPRs */
+#ifdef CONFIG_PPC64
+#define SPRN_MMCR0     795
+#define   MMCR0_FC     0x80000000UL /* freeze counters */
+#define   MMCR0_FCS    0x40000000UL /* freeze in supervisor state */
+#define   MMCR0_KERNEL_DISABLE MMCR0_FCS
+#define   MMCR0_FCP    0x20000000UL /* freeze in problem state */
+#define   MMCR0_PROBLEM_DISABLE MMCR0_FCP
+#define   MMCR0_FCM1   0x10000000UL /* freeze counters while MSR mark = 1 */
+#define   MMCR0_FCM0   0x08000000UL /* freeze counters while MSR mark = 0 */
+#define   MMCR0_PMXE   0x04000000UL /* performance monitor exception enable */
+#define   MMCR0_FCECE  0x02000000UL /* freeze ctrs on enabled cond or event */
+#define   MMCR0_TBEE   0x00400000UL /* time base exception enable */
+#define   MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
+#define   MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
+#define   MMCR0_TRIGGER        0x00002000UL /* TRIGGER enable */
+#define   MMCR0_PMAO   0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
+#define   MMCR0_SHRFC  0x00000040UL /* SHRre freeze conditions between threads */
+#define   MMCR0_FCTI   0x00000008UL /* freeze counters in tags inactive mode */
+#define   MMCR0_FCTA   0x00000004UL /* freeze counters in tags active mode */
+#define   MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
+#define   MMCR0_FCHV   0x00000001UL /* freeze conditions in hypervisor mode */
+#define SPRN_MMCR1     798
+#define SPRN_MMCRA     0x312
+#define   MMCRA_SIHV   0x10000000UL /* state of MSR HV when SIAR set */
+#define   MMCRA_SIPR   0x08000000UL /* state of MSR PR when SIAR set */
+#define   MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
+#define SPRN_PMC1      787
+#define SPRN_PMC2      788
+#define SPRN_PMC3      789
+#define SPRN_PMC4      790
+#define SPRN_PMC5      791
+#define SPRN_PMC6      792
+#define SPRN_PMC7      793
+#define SPRN_PMC8      794
+#define SPRN_SIAR      780
+#define SPRN_SDAR      781
+
+#else /* 32-bit */
+#define SPRN_MMCR0     0x3B8   /* Monitor Mode Control Register 0 */
+#define SPRN_MMCR1     0x3BC   /* Monitor Mode Control Register 1 */
+#define SPRN_PMC1      0x3B9   /* Performance Counter Register 1 */
+#define SPRN_PMC2      0x3BA   /* Performance Counter Register 2 */
+#define SPRN_PMC3      0x3BD   /* Performance Counter Register 3 */
+#define SPRN_PMC4      0x3BE   /* Performance Counter Register 4 */
+
 /* Bit definitions for MMCR0 and PMC1 / PMC2. */
 #define MMCR0_PMC1_CYCLES      (1 << 7)
 #define MMCR0_PMC1_ICACHEMISS  (5 << 7)
 #define MMCR0_PMC2_ITLB                0x7
 #define MMCR0_PMC2_LOADMISSTIME        0x5
 #define MMCR0_PMXE     (1 << 26)
-
-/* Processor Version Register */
+#endif
 
 /* Processor Version Register (PVR) field extraction */
 
 #define PVR_VER(pvr)   (((pvr) >>  16) & 0xFFFF)       /* Version field */
 #define PVR_REV(pvr)   (((pvr) >>   0) & 0xFFFF)       /* Revison field */
 
+#define __is_processor(pv)     (PVR_VER(mfspr(SPRN_PVR)) == (pv))
+
 /*
  * IBM has further subdivided the standard PowerPC 16-bit version and
  * revision subfields of the PVR for the PowerPC 403s into the following:
 #define PVR_8245       0x80811014
 #define PVR_8260       PVR_8240
 
-#if 0
-/* Segment Registers */
-#define SR0    0
-#define SR1    1
-#define SR2    2
-#define SR3    3
-#define SR4    4
-#define SR5    5
-#define SR6    6
-#define SR7    7
-#define SR8    8
-#define SR9    9
-#define SR10   10
-#define SR11   11
-#define SR12   12
-#define SR13   13
-#define SR14   14
-#define SR15   15
-#endif
+/* 64-bit processors */
+/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
+#define        PV_NORTHSTAR    0x0033
+#define        PV_PULSAR       0x0034
+#define        PV_POWER4       0x0035
+#define        PV_ICESTAR      0x0036
+#define        PV_SSTAR        0x0037
+#define        PV_POWER4p      0x0038
+#define PV_970         0x0039
+#define        PV_POWER5       0x003A
+#define PV_POWER5p     0x003B
+#define PV_970FX       0x003C
+#define        PV_630          0x0040
+#define        PV_630p 0x0041
+#define        PV_970MP        0x0044
+#define        PV_BE           0x0070
+
+/*
+ * Number of entries in the SLB. If this ever changes we should handle
+ * it with a use a cpu feature fixup.
+ */
+#define SLB_NUM_ENTRIES 64
 
 /* Macros for setting and retrieving special purpose registers */
 #ifndef __ASSEMBLY__
-#define mfmsr()                ({unsigned int rval; \
+#define mfmsr()                ({unsigned long rval; \
                        asm volatile("mfmsr %0" : "=r" (rval)); rval;})
+#ifdef CONFIG_PPC64
+#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
+                                    : : "r" (v))
+#define mtmsrd(v)      __mtmsrd((v), 0)
+#else
 #define mtmsr(v)       asm volatile("mtmsr %0" : : "r" (v))
+#endif
 
-#define mfspr(rn)      ({unsigned int rval; \
+#define mfspr(rn)      ({unsigned long rval; \
                        asm volatile("mfspr %0," __stringify(rn) \
                                : "=r" (rval)); rval;})
 #define mtspr(rn, v)   asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
 
+#define mftb()         ({unsigned long rval;   \
+                       asm volatile("mftb %0" : "=r" (rval)); rval;})
+#define mftbl()                ({unsigned long rval;   \
+                       asm volatile("mftbl %0" : "=r" (rval)); rval;})
+
+#define mttbl(v)       asm volatile("mttbl %0":: "r"(v))
+#define mttbu(v)       asm volatile("mttbu %0":: "r"(v))
+
+#ifdef CONFIG_PPC32
 #define mfsrin(v)      ({unsigned int rval; \
                        asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \
                                        rval;})
+#endif
 
 #define proc_trap()    asm volatile("trap")
+
+#ifdef CONFIG_PPC64
+static inline void ppc64_runlatch_on(void)
+{
+       unsigned long ctrl;
+
+       if (cpu_has_feature(CPU_FTR_CTRL)) {
+               ctrl = mfspr(SPRN_CTRLF);
+               ctrl |= CTRL_RUNLATCH;
+               mtspr(SPRN_CTRLT, ctrl);
+       }
+}
+
+static inline void ppc64_runlatch_off(void)
+{
+       unsigned long ctrl;
+
+       if (cpu_has_feature(CPU_FTR_CTRL)) {
+               ctrl = mfspr(SPRN_CTRLF);
+               ctrl &= ~CTRL_RUNLATCH;
+               mtspr(SPRN_CTRLT, ctrl);
+       }
+}
+
+static inline void set_tb(unsigned int upper, unsigned int lower)
+{
+       mttbl(0);
+       mttbu(upper);
+       mttbl(lower);
+}
+#endif
+
+#define __get_SP()     ({unsigned long sp; \
+                       asm volatile("mr %0,1": "=r" (sp)); sp;})
+
+#else /* __ASSEMBLY__ */
+
+#define RUNLATCH_ON(REG)                       \
+BEGIN_FTR_SECTION                              \
+       mfspr   (REG),SPRN_CTRLF;               \
+       ori     (REG),(REG),CTRL_RUNLATCH;      \
+       mtspr   SPRN_CTRLT,(REG);               \
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+
 #endif /* __ASSEMBLY__ */
-#endif /* __ASM_PPC_REGS_H__ */
 #endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_REG_H */
similarity index 98%
rename from include/asm-ppc64/rtas.h
rename to include/asm-powerpc/rtas.h
index e7d1b52..2c05033 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _PPC64_RTAS_H
-#define _PPC64_RTAS_H
+#ifndef _POWERPC_RTAS_H
+#define _POWERPC_RTAS_H
 
 #include <linux/spinlock.h>
 #include <asm/page.h>
@@ -190,7 +190,7 @@ extern void rtas_progress(char *s, unsigned short hex);
 extern void rtas_initialize(void);
 
 struct rtc_time;
-extern void rtas_get_boot_time(struct rtc_time *rtc_time);
+extern unsigned long rtas_get_boot_time(void);
 extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
 extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
 
@@ -246,4 +246,4 @@ extern unsigned long rtas_rmo_buf;
 
 #define GLOBAL_INTERRUPT_QUEUE 9005
 
-#endif /* _PPC64_RTAS_H */
+#endif /* _POWERPC_RTAS_H */
diff --git a/include/asm-powerpc/rtc.h b/include/asm-powerpc/rtc.h
new file mode 100644 (file)
index 0000000..f580292
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Real-time clock definitions and interfaces
+ *
+ * Author: Tom Rini <trini@mvista.com>
+ *
+ * 2002 (c) MontaVista, Software, Inc.  This file is licensed under
+ * the terms of the GNU General Public License version 2.  This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Based on:
+ * include/asm-m68k/rtc.h
+ *
+ * Copyright Richard Zidlicky
+ * implementation details for genrtc/q40rtc driver
+ *
+ * And the old drivers/macintosh/rtc.c which was heavily based on:
+ * Linux/SPARC Real Time Clock Driver
+ * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
+ *
+ * With additional work by Paul Mackerras and Franz Sirl.
+ */
+
+#ifndef __ASM_POWERPC_RTC_H__
+#define __ASM_POWERPC_RTC_H__
+
+#ifdef __KERNEL__
+
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/time.h>
+
+#define RTC_PIE 0x40           /* periodic interrupt enable */
+#define RTC_AIE 0x20           /* alarm interrupt enable */
+#define RTC_UIE 0x10           /* update-finished interrupt enable */
+
+/* some dummy definitions */
+#define RTC_BATT_BAD 0x100     /* battery bad */
+#define RTC_SQWE 0x08          /* enable square-wave output */
+#define RTC_DM_BINARY 0x04     /* all time/date values are BCD if clear */
+#define RTC_24H 0x02           /* 24 hour mode - else hours bit 7 means pm */
+#define RTC_DST_EN 0x01                /* auto switch DST - works f. USA only */
+
+static inline unsigned int get_rtc_time(struct rtc_time *time)
+{
+       if (ppc_md.get_rtc_time)
+               ppc_md.get_rtc_time(time);
+       return RTC_24H;
+}
+
+/* Set the current date and time in the real time clock. */
+static inline int set_rtc_time(struct rtc_time *time)
+{
+       if (ppc_md.set_rtc_time)
+               return ppc_md.set_rtc_time(time);
+       return -EINVAL;
+}
+
+static inline unsigned int get_rtc_ss(void)
+{
+       struct rtc_time h;
+
+       get_rtc_time(&h);
+       return h.tm_sec;
+}
+
+static inline int get_rtc_pll(struct rtc_pll_info *pll)
+{
+       return -EINVAL;
+}
+static inline int set_rtc_pll(struct rtc_pll_info *pll)
+{
+       return -EINVAL;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_RTC_H__ */
similarity index 91%
rename from include/asm-ppc64/rwsem.h
rename to include/asm-powerpc/rwsem.h
index bd5c2f0..0a5b83a 100644 (file)
@@ -1,18 +1,14 @@
+#ifndef _ASM_POWERPC_RWSEM_H
+#define _ASM_POWERPC_RWSEM_H
+
+#ifdef __KERNEL__
+
 /*
  * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
  * in lib/rwsem.c.  Adapted largely from include/asm-i386/rwsem.h
  * by Paul Mackerras <paulus@samba.org>.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
-#ifndef _PPC64_RWSEM_H
-#define _PPC64_RWSEM_H
-
-#ifdef __KERNEL__
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <asm/atomic.h>
@@ -163,5 +159,5 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
        return atomic_add_return(delta, (atomic_t *)(&sem->count));
 }
 
-#endif /* __KERNEL__ */
-#endif /* _PPC_RWSEM_XADD_H */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_RWSEM_H */
similarity index 66%
rename from include/asm-ppc64/seccomp.h
rename to include/asm-powerpc/seccomp.h
index c130c33..1e1cfe1 100644 (file)
@@ -1,11 +1,6 @@
-#ifndef _ASM_SECCOMP_H
-
-#include <linux/thread_info.h> /* already defines TIF_32BIT */
-
-#ifndef TIF_32BIT
-#error "unexpected TIF_32BIT on ppc64"
-#endif
+#ifndef _ASM_POWERPC_SECCOMP_H
 
+#include <linux/thread_info.h>
 #include <linux/unistd.h>
 
 #define __NR_seccomp_read __NR_read
@@ -18,4 +13,4 @@
 #define __NR_seccomp_exit_32 __NR_exit
 #define __NR_seccomp_sigreturn_32 __NR_sigreturn
 
-#endif /* _ASM_SECCOMP_H */
+#endif /* _ASM_POWERPC_SECCOMP_H */
similarity index 51%
rename from include/asm-ppc64/sections.h
rename to include/asm-powerpc/sections.h
index 308ca6f..47be2ac 100644 (file)
@@ -1,22 +1,11 @@
-#ifndef _PPC64_SECTIONS_H
-#define _PPC64_SECTIONS_H
-
-extern char _end[];
+#ifndef _ASM_POWERPC_SECTIONS_H
+#define _ASM_POWERPC_SECTIONS_H
 
 #include <asm-generic/sections.h>
 
-#define __pmac
-#define __pmacdata
-
-#define __prep
-#define __prepdata
-
-#define __chrp
-#define __chrpdata
-
-#define __openfirmware
-#define __openfirmwaredata
+#ifdef __powerpc64__
 
+extern char _end[];
 
 static inline int in_kernel_text(unsigned long addr)
 {
@@ -27,3 +16,5 @@ static inline int in_kernel_text(unsigned long addr)
 }
 
 #endif
+
+#endif /* _ASM_POWERPC_SECTIONS_H */
similarity index 95%
rename from include/asm-ppc64/semaphore.h
rename to include/asm-powerpc/semaphore.h
index aefe775..fd42fe9 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _PPC64_SEMAPHORE_H
-#define _PPC64_SEMAPHORE_H
+#ifndef _ASM_POWERPC_SEMAPHORE_H
+#define _ASM_POWERPC_SEMAPHORE_H
 
 /*
  * Remove spinlock-based RW semaphores; RW semaphore definitions are
@@ -95,4 +95,4 @@ static inline void up(struct semaphore * sem)
 
 #endif /* __KERNEL__ */
 
-#endif /* !(_PPC64_SEMAPHORE_H) */
+#endif /* _ASM_POWERPC_SEMAPHORE_H */
similarity index 80%
rename from include/asm-ppc64/spinlock_types.h
rename to include/asm-powerpc/spinlock_types.h
index a37c8ea..74236c9 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __ASM_SPINLOCK_TYPES_H
-#define __ASM_SPINLOCK_TYPES_H
+#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
+#define _ASM_POWERPC_SPINLOCK_TYPES_H
 
 #ifndef __LINUX_SPINLOCK_TYPES_H
 # error "please don't include this file directly"
similarity index 69%
rename from include/asm-ppc64/statfs.h
rename to include/asm-powerpc/statfs.h
index 3c985e5..6702402 100644 (file)
@@ -1,12 +1,11 @@
-#ifndef _PPC64_STATFS_H
-#define _PPC64_STATFS_H
+#ifndef _ASM_POWERPC_STATFS_H
+#define _ASM_POWERPC_STATFS_H
 
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
+/* For ppc32 we just use the generic definitions, not so simple on ppc64 */
+
+#ifndef __powerpc64__
+#include <asm-generic/statfs.h>
+#else
 
 #ifndef __KERNEL_STRICT_NAMES
 #include <linux/types.h>
@@ -57,5 +56,5 @@ struct compat_statfs64 {
        __u32 f_frsize;
        __u32 f_spare[5];
 };
-
-#endif  /* _PPC64_STATFS_H */
+#endif /* ! __powerpc64__ */
+#endif
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
new file mode 100644 (file)
index 0000000..4660c03
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef _ASM_POWERPC_SYNCH_H 
+#define _ASM_POWERPC_SYNCH_H 
+
+#include <linux/config.h>
+
+#ifdef __powerpc64__
+#define __SUBARCH_HAS_LWSYNC
+#endif
+
+#ifdef __SUBARCH_HAS_LWSYNC
+#    define LWSYNC     lwsync
+#else
+#    define LWSYNC     sync
+#endif
+
+
+/*
+ * Arguably the bitops and *xchg operations don't imply any memory barrier
+ * or SMP ordering, but in fact a lot of drivers expect them to imply
+ * both, since they do on x86 cpus.
+ */
+#ifdef CONFIG_SMP
+#define EIEIO_ON_SMP   "eieio\n"
+#define ISYNC_ON_SMP   "\n\tisync"
+#define SYNC_ON_SMP    __stringify(LWSYNC) "\n"
+#else
+#define EIEIO_ON_SMP
+#define ISYNC_ON_SMP
+#define SYNC_ON_SMP
+#endif
+
+static inline void eieio(void)
+{
+       __asm__ __volatile__ ("eieio" : : : "memory");
+}
+
+static inline void isync(void)
+{
+       __asm__ __volatile__ ("isync" : : : "memory");
+}
+
+#ifdef CONFIG_SMP
+#define eieio_on_smp() eieio()
+#define isync_on_smp() isync()
+#else
+#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
+#define isync_on_smp() __asm__ __volatile__("": : :"memory")
+#endif
+
+#endif /* _ASM_POWERPC_SYNCH_H */
+
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
new file mode 100644 (file)
index 0000000..d60c8c9
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_SYSTEM_H
+#define _ASM_POWERPC_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+
+#include <asm/hw_irq.h>
+#include <asm/ppc_asm.h>
+#include <asm/atomic.h>
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory).  The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ * read_barrier_depends() prevents data-dependent loads being reordered
+ *     across this point (nop on PPC).
+ *
+ * We have to use the sync instructions for mb(), since lwsync doesn't
+ * order loads with respect to previous stores.  Lwsync is fine for
+ * rmb(), though.  Note that lwsync is interpreted as sync by
+ * 32-bit and older 64-bit CPUs.
+ *
+ * For wmb(), we use sync since wmb is used in drivers to order
+ * stores to system memory with respect to writes to the device.
+ * However, smp_wmb() can be a lighter-weight eieio barrier on
+ * SMP since it is only used to order updates to system memory.
+ */
+#define mb()   __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb()  __asm__ __volatile__ ("lwsync" : : : "memory")
+#define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
+#define read_barrier_depends()  do { } while(0)
+
+#define set_mb(var, value)     do { var = value; mb(); } while (0)
+#define set_wmb(var, value)    do { var = value; wmb(); } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb()       mb()
+#define smp_rmb()      rmb()
+#define smp_wmb()      __asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_read_barrier_depends()     read_barrier_depends()
+#else
+#define smp_mb()       barrier()
+#define smp_rmb()      barrier()
+#define smp_wmb()      barrier()
+#define smp_read_barrier_depends()     do { } while(0)
+#endif /* CONFIG_SMP */
+
+#ifdef __KERNEL__
+struct task_struct;
+struct pt_regs;
+
+#ifdef CONFIG_DEBUGGER
+
+extern int (*__debugger)(struct pt_regs *regs);
+extern int (*__debugger_ipi)(struct pt_regs *regs);
+extern int (*__debugger_bpt)(struct pt_regs *regs);
+extern int (*__debugger_sstep)(struct pt_regs *regs);
+extern int (*__debugger_iabr_match)(struct pt_regs *regs);
+extern int (*__debugger_dabr_match)(struct pt_regs *regs);
+extern int (*__debugger_fault_handler)(struct pt_regs *regs);
+
+#define DEBUGGER_BOILERPLATE(__NAME) \
+static inline int __NAME(struct pt_regs *regs) \
+{ \
+       if (unlikely(__ ## __NAME)) \
+               return __ ## __NAME(regs); \
+       return 0; \
+}
+
+DEBUGGER_BOILERPLATE(debugger)
+DEBUGGER_BOILERPLATE(debugger_ipi)
+DEBUGGER_BOILERPLATE(debugger_bpt)
+DEBUGGER_BOILERPLATE(debugger_sstep)
+DEBUGGER_BOILERPLATE(debugger_iabr_match)
+DEBUGGER_BOILERPLATE(debugger_dabr_match)
+DEBUGGER_BOILERPLATE(debugger_fault_handler)
+
+#ifdef CONFIG_XMON
+extern void xmon_init(int enable);
+#endif
+
+#else
+static inline int debugger(struct pt_regs *regs) { return 0; }
+static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
+static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
+static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
+static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
+#endif
+
+extern int set_dabr(unsigned long dabr);
+extern void print_backtrace(unsigned long *);
+extern void show_regs(struct pt_regs * regs);
+extern void flush_instruction_cache(void);
+extern void hard_reset_now(void);
+extern void poweroff_now(void);
+
+#ifdef CONFIG_6xx
+extern long _get_L2CR(void);
+extern long _get_L3CR(void);
+extern void _set_L2CR(unsigned long);
+extern void _set_L3CR(unsigned long);
+#else
+#define _get_L2CR()    0L
+#define _get_L3CR()    0L
+#define _set_L2CR(val) do { } while(0)
+#define _set_L3CR(val) do { } while(0)
+#endif
+
+extern void via_cuda_init(void);
+extern void read_rtc_time(void);
+extern void pmac_find_display(void);
+extern void giveup_fpu(struct task_struct *);
+extern void disable_kernel_fp(void);
+extern void enable_kernel_fp(void);
+extern void flush_fp_to_thread(struct task_struct *);
+extern void enable_kernel_altivec(void);
+extern void giveup_altivec(struct task_struct *);
+extern void load_up_altivec(struct task_struct *);
+extern int emulate_altivec(struct pt_regs *);
+extern void giveup_spe(struct task_struct *);
+extern void load_up_spe(struct task_struct *);
+extern int fix_alignment(struct pt_regs *);
+extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
+extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+
+#ifdef CONFIG_ALTIVEC
+extern void flush_altivec_to_thread(struct task_struct *);
+#else
+static inline void flush_altivec_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+#ifdef CONFIG_SPE
+extern void flush_spe_to_thread(struct task_struct *);
+#else
+static inline void flush_spe_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+extern int call_rtas(const char *, int, int, unsigned long *, ...);
+extern void cacheable_memzero(void *p, unsigned int nb);
+extern void *cacheable_memcpy(void *, const void *, unsigned int);
+extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+extern void bad_page_fault(struct pt_regs *, unsigned long, int);
+extern int die(const char *, struct pt_regs *, long);
+extern void _exception(int, struct pt_regs *, int, unsigned long);
+#ifdef CONFIG_BOOKE_WDT
+extern u32 booke_wdt_enabled;
+extern u32 booke_wdt_period;
+#endif /* CONFIG_BOOKE_WDT */
+
+/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
+extern unsigned char e2a(unsigned char);
+
+struct device_node;
+extern void note_scsi_host(struct device_node *, void *);
+
+extern struct task_struct *__switch_to(struct task_struct *,
+       struct task_struct *);
+#define switch_to(prev, next, last)    ((last) = __switch_to((prev), (next)))
+
+struct thread_struct;
+extern struct task_struct *_switch(struct thread_struct *prev,
+                                  struct thread_struct *next);
+
+extern unsigned int rtas_data;
+extern int mem_init_done;      /* set on boot once kmalloc can be called */
+
+extern int powersave_nap;      /* set if nap mode can be used in idle loop */
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ */
+static __inline__ unsigned long
+__xchg_u32(volatile void *p, unsigned long val)
+{
+       unsigned long prev;
+
+       __asm__ __volatile__(
+       EIEIO_ON_SMP
+"1:    lwarx   %0,0,%2 \n"
+       PPC405_ERR77(0,%2)
+"      stwcx.  %3,0,%2 \n\
+       bne-    1b"
+       ISYNC_ON_SMP
+       : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
+       : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
+       : "cc", "memory");
+
+       return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __inline__ unsigned long
+__xchg_u64(volatile void *p, unsigned long val)
+{
+       unsigned long prev;
+
+       __asm__ __volatile__(
+       EIEIO_ON_SMP
+"1:    ldarx   %0,0,%2 \n"
+       PPC405_ERR77(0,%2)
+"      stdcx.  %3,0,%2 \n\
+       bne-    1b"
+       ISYNC_ON_SMP
+       : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
+       : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
+       : "cc", "memory");
+
+       return prev;
+}
+#endif
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__xchg(volatile void *ptr, unsigned long x, unsigned int size)
+{
+       switch (size) {
+       case 4:
+               return __xchg_u32(ptr, x);
+#ifdef CONFIG_PPC64
+       case 8:
+               return __xchg_u64(ptr, x);
+#endif
+       }
+       __xchg_called_with_bad_pointer();
+       return x;
+}
+
+#define xchg(ptr,x)                                                         \
+  ({                                                                        \
+     __typeof__(*(ptr)) _x_ = (x);                                          \
+     (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+  })
+
+#define tas(ptr) (xchg((ptr),1))
+
+/*
+ * Compare and exchange - if *p == old, set it to new,
+ * and return the old value of *p.
+ */
+#define __HAVE_ARCH_CMPXCHG    1
+
+static __inline__ unsigned long
+__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
+{
+       unsigned int prev;
+
+       __asm__ __volatile__ (
+       EIEIO_ON_SMP
+"1:    lwarx   %0,0,%2         # __cmpxchg_u32\n\
+       cmpw    0,%0,%3\n\
+       bne-    2f\n"
+       PPC405_ERR77(0,%2)
+"      stwcx.  %4,0,%2\n\
+       bne-    1b"
+       ISYNC_ON_SMP
+       "\n\
+2:"
+       : "=&r" (prev), "=m" (*p)
+       : "r" (p), "r" (old), "r" (new), "m" (*p)
+       : "cc", "memory");
+
+       return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __inline__ unsigned long
+__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
+{
+       unsigned long prev;
+
+       __asm__ __volatile__ (
+       EIEIO_ON_SMP
+"1:    ldarx   %0,0,%2         # __cmpxchg_u64\n\
+       cmpd    0,%0,%3\n\
+       bne-    2f\n\
+       stdcx.  %4,0,%2\n\
+       bne-    1b"
+       ISYNC_ON_SMP
+       "\n\
+2:"
+       : "=&r" (prev), "=m" (*p)
+       : "r" (p), "r" (old), "r" (new), "m" (*p)
+       : "cc", "memory");
+
+       return prev;
+}
+#endif
+
+/* This function doesn't exist, so you'll get a linker error
+   if something tries to do an invalid cmpxchg().  */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+         unsigned int size)
+{
+       switch (size) {
+       case 4:
+               return __cmpxchg_u32(ptr, old, new);
+#ifdef CONFIG_PPC64
+       case 8:
+               return __cmpxchg_u64(ptr, old, new);
+#endif
+       }
+       __cmpxchg_called_with_bad_pointer();
+       return old;
+}
+
+#define cmpxchg(ptr,o,n)                                                \
+  ({                                                                    \
+     __typeof__(*(ptr)) _o_ = (o);                                      \
+     __typeof__(*(ptr)) _n_ = (n);                                      \
+     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,          \
+                                   (unsigned long)_n_, sizeof(*(ptr))); \
+  })
+
+#ifdef CONFIG_PPC64
+/*
+ * We handle most unaligned accesses in hardware. On the other hand 
+ * unaligned DMA can be very expensive on some ppc64 IO chips (it does
+ * powers of 2 writes until it reaches sufficient alignment).
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN   0
+#endif
+
+#define arch_align_stack(x) (x)
+
+/* Used in very early kernel initialization. */
+extern unsigned long reloc_offset(void);
+extern unsigned long add_reloc_offset(unsigned long);
+extern void reloc_got2(unsigned long);
+
+#define PTRRELOC(x)    ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SYSTEM_H */
similarity index 83%
rename from include/asm-ppc64/thread_info.h
rename to include/asm-powerpc/thread_info.h
index 0494df6..0b4c245 100644 (file)
@@ -1,15 +1,25 @@
-/* thread_info.h: PPC low-level thread information
+/* thread_info.h: PowerPC low-level thread information
  * adapted from the i386 version by Paul Mackerras
  *
  * Copyright (C) 2002  David Howells (dhowells@redhat.com)
  * - Incorporating suggestions made by Linus Torvalds and Dave Miller
  */
 
-#ifndef _ASM_THREAD_INFO_H
-#define _ASM_THREAD_INFO_H
+#ifndef _ASM_POWERPC_THREAD_INFO_H
+#define _ASM_POWERPC_THREAD_INFO_H
 
 #ifdef __KERNEL__
 
+/* We have 8k stacks on ppc32 and 16k on ppc64 */
+
+#ifdef CONFIG_PPC64
+#define THREAD_SHIFT           14
+#else
+#define THREAD_SHIFT           13
+#endif
+
+#define THREAD_SIZE            (1 << THREAD_SHIFT)
+
 #ifndef __ASSEMBLY__
 #include <linux/config.h>
 #include <linux/cache.h>
@@ -24,7 +34,8 @@ struct thread_info {
        struct task_struct *task;               /* main task structure */
        struct exec_domain *exec_domain;        /* execution domain */
        int             cpu;                    /* cpu we're on */
-       int             preempt_count;          /* 0 => preemptable, <0 => BUG */
+       int             preempt_count;          /* 0 => preemptable,
+                                                  <0 => BUG */
        struct restart_block restart_block;
        /* set by force_successful_syscall_return */
        unsigned char   syscall_noerror;
@@ -54,9 +65,6 @@ struct thread_info {
 
 /* thread information allocation */
 
-#define THREAD_ORDER           2
-#define THREAD_SIZE            (PAGE_SIZE << THREAD_ORDER)
-#define THREAD_SHIFT           (PAGE_SHIFT + THREAD_ORDER)
 #ifdef CONFIG_DEBUG_STACK_USAGE
 #define alloc_thread_info(tsk)                                 \
        ({                                                      \
@@ -68,7 +76,7 @@ struct thread_info {
                ret;                                            \
        })
 #else
-#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
+#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
 #endif
 #define free_thread_info(ti)   kfree(ti)
 #define get_thread_info(ti)    get_task_struct((ti)->task)
@@ -77,9 +85,11 @@ struct thread_info {
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
 {
-       struct thread_info *ti;
-       __asm__("clrrdi %0,1,%1" : "=r"(ti) : "i" (THREAD_SHIFT));
-       return ti;
+       register unsigned long sp asm("r1");
+
+       /* gcc4, at least, is smart enough to turn this into a single
+        * rlwinm for ppc32 and clrrdi for ppc64 */
+       return (struct thread_info *)(sp & ~(THREAD_SIZE-1));
 }
 
 #endif /* __ASSEMBLY__ */
@@ -122,4 +132,4 @@ static inline struct thread_info *current_thread_info(void)
 
 #endif /* __KERNEL__ */
 
-#endif /* _ASM_THREAD_INFO_H */
+#endif /* _ASM_POWERPC_THREAD_INFO_H */
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
new file mode 100644 (file)
index 0000000..99bfe32
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+ * Common time prototypes and such for all ppc machines.
+ *
+ * Written by Cort Dougan (cort@cs.nmt.edu) to merge
+ * Paul Mackerras' version and mine for PReP and Pmac.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __POWERPC_TIME_H
+#define __POWERPC_TIME_H
+
+#ifdef __KERNEL__
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/percpu.h>
+
+#include <asm/processor.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#include <asm/iSeries/HvCall.h>
+#endif
+
+/* time.c */
+extern unsigned long tb_ticks_per_jiffy;
+extern unsigned long tb_ticks_per_usec;
+extern unsigned long tb_ticks_per_sec;
+extern u64 tb_to_xs;
+extern unsigned      tb_to_us;
+extern u64 tb_last_stamp;
+
+DECLARE_PER_CPU(unsigned long, last_jiffy);
+
+struct rtc_time;
+extern void to_tm(int tim, struct rtc_time * tm);
+extern time_t last_rtc_update;
+
+extern void generic_calibrate_decr(void);
+extern void wakeup_decrementer(void);
+
+/* Some sane defaults: 125 MHz timebase, 1GHz processor */
+extern unsigned long ppc_proc_freq;
+#define DEFAULT_PROC_FREQ      (DEFAULT_TB_FREQ * 8)
+extern unsigned long ppc_tb_freq;
+#define DEFAULT_TB_FREQ                125000000UL
+
+/*
+ * By putting all of this stuff into a single struct we 
+ * reduce the number of cache lines touched by do_gettimeofday.
+ * Both by collecting all of the data in one cache line and
+ * by touching only one TOC entry on ppc64.
+ */
+struct gettimeofday_vars {
+       u64 tb_to_xs;
+       u64 stamp_xsec;
+       u64 tb_orig_stamp;
+};
+
+struct gettimeofday_struct {
+       unsigned long tb_ticks_per_sec;
+       struct gettimeofday_vars vars[2];
+       struct gettimeofday_vars * volatile varp;
+       unsigned      var_idx;
+       unsigned      tb_to_us;
+};
+
+struct div_result {
+       u64 result_high;
+       u64 result_low;
+};
+
+/* Accessor functions for the timebase (RTC on 601) registers. */
+/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
+#ifdef CONFIG_6xx
+#define __USE_RTC()    (!cpu_has_feature(CPU_FTR_USE_TB))
+#else
+#define __USE_RTC()    0
+#endif
+
+/* On ppc64 this gets us the whole timebase; on ppc32 just the lower half */
+static inline unsigned long get_tbl(void)
+{
+       unsigned long tbl;
+
+#if defined(CONFIG_403GCX)
+       asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
+#else
+       asm volatile("mftb %0" : "=r" (tbl));
+#endif
+       return tbl;
+}
+
+static inline unsigned int get_tbu(void)
+{
+       unsigned int tbu;
+
+#if defined(CONFIG_403GCX)
+       asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
+#else
+       asm volatile("mftbu %0" : "=r" (tbu));
+#endif
+       return tbu;
+}
+
+static inline unsigned int get_rtcl(void)
+{
+       unsigned int rtcl;
+
+       asm volatile("mfrtcl %0" : "=r" (rtcl));
+       return rtcl;
+}
+
+#ifdef CONFIG_PPC64
+static inline u64 get_tb(void)
+{
+       return mftb();
+}
+#else
+static inline u64 get_tb(void)
+{
+       unsigned int tbhi, tblo, tbhi2;
+
+       do {
+               tbhi = get_tbu();
+               tblo = get_tbl();
+               tbhi2 = get_tbu();
+       } while (tbhi != tbhi2);
+
+       return ((u64)tbhi << 32) | tblo;
+}
+#endif
+
+static inline void set_tb(unsigned int upper, unsigned int lower)
+{
+       mtspr(SPRN_TBWL, 0);
+       mtspr(SPRN_TBWU, upper);
+       mtspr(SPRN_TBWL, lower);
+}
+
+/* Accessor functions for the decrementer register.
+ * The 4xx doesn't even have a decrementer.  I tried to use the
+ * generic timer interrupt code, which seems OK, with the 4xx PIT
+ * in auto-reload mode.  The problem is PIT stops counting when it
+ * hits zero.  If it would wrap, we could use it just like a decrementer.
+ */
+static inline unsigned int get_dec(void)
+{
+#if defined(CONFIG_40x)
+       return (mfspr(SPRN_PIT));
+#else
+       return (mfspr(SPRN_DEC));
+#endif
+}
+
+static inline void set_dec(int val)
+{
+#if defined(CONFIG_40x)
+       return;         /* Have to let it auto-reload */
+#elif defined(CONFIG_8xx_CPU6)
+       set_dec_cpu6(val);
+#else
+#ifdef CONFIG_PPC_ISERIES
+       struct paca_struct *lpaca = get_paca();
+       int cur_dec;
+
+       if (lpaca->lppaca.shared_proc) {
+               lpaca->lppaca.virtual_decr = val;
+               cur_dec = get_dec();
+               if (cur_dec > val)
+                       HvCall_setVirtualDecr();
+       } else
+#endif
+               mtspr(SPRN_DEC, val);
+#endif /* not 40x or 8xx_CPU6 */
+}
+
+static inline unsigned long tb_ticks_since(unsigned long tstamp)
+{
+       if (__USE_RTC()) {
+               int delta = get_rtcl() - (unsigned int) tstamp;
+               return delta < 0 ? delta + 1000000000 : delta;
+       }
+       return get_tbl() - tstamp;
+}
+
+#define mulhwu(x,y) \
+({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+
+#ifdef CONFIG_PPC64
+#define mulhdu(x,y) \
+({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+#else
+extern u64 mulhdu(u64, u64);
+#endif
+
+extern void smp_space_timers(unsigned int);
+
+extern unsigned mulhwu_scale_factor(unsigned, unsigned);
+extern void div128_by_32(u64 dividend_high, u64 dividend_low,
+                        unsigned divisor, struct div_result *dr);
+
+/* Used to store Processor Utilization register (purr) values */
+
+struct cpu_usage {
+        u64 current_tb;  /* Holds the current purr register values */
+};
+
+DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
+
+#endif /* __KERNEL__ */
+#endif /* __PPC64_TIME_H */
similarity index 74%
rename from include/asm-ppc64/types.h
rename to include/asm-powerpc/types.h
index bf294c1..7d803cb 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _PPC64_TYPES_H
-#define _PPC64_TYPES_H
+#ifndef _ASM_POWERPC_TYPES_H
+#define _ASM_POWERPC_TYPES_H
 
 #ifndef __ASSEMBLY__
 
  * 2 of the License, or (at your option) any later version.
  */
 
+#ifdef __powerpc64__
 typedef unsigned int umode_t;
+#else
+typedef unsigned short umode_t;
+#endif
 
 /*
  * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
@@ -32,8 +36,15 @@ typedef unsigned short __u16;
 typedef __signed__ int __s32;
 typedef unsigned int __u32;
 
+#ifdef __powerpc64__
 typedef __signed__ long __s64;
 typedef unsigned long __u64;
+#else
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+#endif /* __powerpc64__ */
 
 typedef struct {
        __u32 u[4];
@@ -45,7 +56,11 @@ typedef struct {
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
+#ifdef __powerpc64__
 #define BITS_PER_LONG 64
+#else
+#define BITS_PER_LONG 32
+#endif
 
 #ifndef __ASSEMBLY__
 
@@ -58,8 +73,13 @@ typedef unsigned short u16;
 typedef signed int s32;
 typedef unsigned int u32;
 
+#ifdef __powerpc64__
 typedef signed long s64;
 typedef unsigned long u64;
+#else
+typedef signed long long s64;
+typedef unsigned long long u64;
+#endif
 
 typedef __vector128 vector128;
 
@@ -72,8 +92,13 @@ typedef struct {
        unsigned long env;
 } func_descr_t;
 
+#ifdef CONFIG_LBD
+typedef u64 sector_t;
+#define HAVE_SECTOR_T
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
 
-#endif /* _PPC64_TYPES_H */
+#endif /* _ASM_POWERPC_TYPES_H */
similarity index 90%
rename from include/asm-ppc/unistd.h
rename to include/asm-powerpc/unistd.h
index 3173ab3..c2d039e 100644 (file)
@@ -3,7 +3,13 @@
 
 /*
  * This file contains the system call numbers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
  */
+
 #define __NR_restart_syscall     0
 #define __NR_exit                1
 #define __NR_fork                2
 #define __NR_vfork             189
 #define __NR_ugetrlimit                190     /* SuS compliant getrlimit */
 #define __NR_readahead         191
+#ifndef __powerpc64__                  /* these are 32-bit only */
 #define __NR_mmap2             192
 #define __NR_truncate64                193
 #define __NR_ftruncate64       194
 #define __NR_stat64            195
 #define __NR_lstat64           196
 #define __NR_fstat64           197
+#endif
 #define __NR_pciconfig_read    198
 #define __NR_pciconfig_write   199
 #define __NR_pciconfig_iobase  200
 #define __NR_multiplexer       201
 #define __NR_getdents64                202
 #define __NR_pivot_root                203
+#ifndef __powerpc64__
 #define __NR_fcntl64           204
+#endif
 #define __NR_madvise           205
 #define __NR_mincore           206
 #define __NR_gettid            207
 #define __NR_sched_getaffinity 223
 /* 224 currently unused */
 #define __NR_tuxcall           225
+#ifndef __powerpc64__
 #define __NR_sendfile64                226
+#endif
 #define __NR_io_setup          227
 #define __NR_io_destroy                228
 #define __NR_io_getevents      229
 #define __NR_utimes            251
 #define __NR_statfs64          252
 #define __NR_fstatfs64         253
+#ifndef __powerpc64__
 #define __NR_fadvise64_64      254
+#endif
 #define __NR_rtas              255
 #define __NR_sys_debug_setcontext 256
 /* Number 257 is reserved for vserver */
 /* 258 currently unused */
-/* Number 259 is reserved for new sys_mbind */
-/* Number 260 is reserved for new sys_get_mempolicy */
-/* Number 261 is reserved for new sys_set_mempolicy */
+#define __NR_mbind             259
+#define __NR_get_mempolicy     260
+#define __NR_set_mempolicy     261
 #define __NR_mq_open           262
 #define __NR_mq_unlink         263
 #define __NR_mq_timedsend      264
 
 #define __NR_syscalls          278
 
-#define __NR(n)        #n
+#ifdef __KERNEL__
+#define __NR__exit __NR_exit
+#define NR_syscalls    __NR_syscalls
+#endif
+
+#ifndef __ASSEMBLY__
 
 /* On powerpc a system call basically clobbers the same registers like a
  * function call, with the exception of LR (which is needed for the
@@ -389,7 +408,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)       \
 {                                                                      \
        __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5);      \
 }
-
 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
 type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
 {                                                                      \
@@ -398,12 +416,13 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
 
 #ifdef __KERNEL__
 
-#define __NR__exit __NR_exit
-#define NR_syscalls    __NR_syscalls
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/linkage.h>
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_OLD_STAT
 #define __ARCH_WANT_STAT64
 #define __ARCH_WANT_SYS_ALARM
 #define __ARCH_WANT_SYS_GETHOSTNAME
@@ -423,23 +442,17 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
 #define __ARCH_WANT_SYS_SIGPENDING
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_RT_SIGACTION
-
-/*
- * Forking from kernel space will result in the child getting a new,
- * empty kernel stack area.  Thus the child cannot access automatic
- * variables set in the parent unless they are in registers, and the
- * procedure where the fork was done cannot return to its caller in
- * the child.
- */
-
-#ifdef __KERNEL_SYSCALLS__
-
-#include <linux/compiler.h>
-#include <linux/types.h>
+#ifdef CONFIG_PPC32
+#define __ARCH_WANT_OLD_STAT
+#endif
+#ifdef CONFIG_PPC64
+#define __ARCH_WANT_COMPAT_SYS_TIME
+#endif
 
 /*
  * System call prototypes.
  */
+#ifdef __KERNEL_SYSCALLS__
 extern pid_t setsid(void);
 extern int write(int fd, const char *buf, off_t count);
 extern int read(int fd, char *buf, off_t count);
@@ -449,10 +462,13 @@ extern int execve(const char *file, char **argv, char **envp);
 extern int open(const char *file, int flag, int mode);
 extern int close(int fd);
 extern pid_t waitpid(pid_t pid, int *wait_stat, int options);
+#endif /* __KERNEL_SYSCALLS__ */
 
-unsigned long sys_mmap(unsigned long addr, size_t len,
-                       unsigned long prot, unsigned long flags,
-                       unsigned long fd, off_t offset);
+/*
+ * Functions that implement syscalls.
+ */
+unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot,
+                      unsigned long flags, unsigned long fd, off_t offset);
 unsigned long sys_mmap2(unsigned long addr, size_t len,
                        unsigned long prot, unsigned long flags,
                        unsigned long fd, unsigned long pgoff);
@@ -461,22 +477,19 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
                unsigned long a3, unsigned long a4, unsigned long a5,
                struct pt_regs *regs);
 int sys_clone(unsigned long clone_flags, unsigned long usp,
-             int __user *parent_tidp, void __user *child_threadptr,
-             int __user *child_tidp, int p6,
-             struct pt_regs *regs);
-int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
+               int __user *parent_tidp, void __user *child_threadptr,
+               int __user *child_tidp, int p6, struct pt_regs *regs);
+int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
+               unsigned long p4, unsigned long p5, unsigned long p6,
                struct pt_regs *regs);
-int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
+int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
+               unsigned long p4, unsigned long p5, unsigned long p6,
                struct pt_regs *regs);
 int sys_pipe(int __user *fildes);
 int sys_ptrace(long request, long pid, long addr, long data);
 struct sigaction;
-long sys_rt_sigaction(int sig,
-                     const struct sigaction __user *act,
-                     struct sigaction __user *oact,
-                     size_t sigsetsize);
-
-#endif /* __KERNEL_SYSCALLS__ */
+long sys_rt_sigaction(int sig, const struct sigaction __user *act,
+                     struct sigaction __user *oact, size_t sigsetsize);
 
 /*
  * "Conditional" syscalls
@@ -484,10 +497,14 @@ long sys_rt_sigaction(int sig,
  * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
  * but it doesn't work on all toolchains, so we just do it by hand
  */
-#ifndef cond_syscall
+#ifdef CONFIG_PPC32
 #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+#else
+#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall")
 #endif
 
-#endif /* __KERNEL__ */
+#endif         /* __KERNEL__ */
+
+#endif         /* __ASSEMBLY__ */
 
 #endif /* _ASM_PPC_UNISTD_H_ */
similarity index 74%
rename from include/asm-ppc64/vga.h
rename to include/asm-powerpc/vga.h
index c098497..f8d350a 100644 (file)
@@ -1,16 +1,14 @@
+#ifndef _ASM_POWERPC_VGA_H_
+#define _ASM_POWERPC_VGA_H_
+
+#ifdef __KERNEL__
+
 /*
  *     Access to VGA videoram
  *
  *     (c) 1998 Martin Mares <mj@ucw.cz>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
-#ifndef _LINUX_ASM_VGA_H_
-#define _LINUX_ASM_VGA_H_
 
 #include <asm/io.h>
 
@@ -42,9 +40,15 @@ static inline u16 scr_readw(volatile const u16 *addr)
 #endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
 
 extern unsigned long vgacon_remap_base;
+
+#ifdef __powerpc64__
 #define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0))
+#else
+#define VGA_MAP_MEM(x) (x + vgacon_remap_base)
+#endif
 
 #define vga_readb(x) (*(x))
 #define vga_writeb(x,y) (*(y) = (x))
 
-#endif
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_VGA_H_ */
diff --git a/include/asm-powerpc/xmon.h b/include/asm-powerpc/xmon.h
new file mode 100644 (file)
index 0000000..ca5f332
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef __PPC_XMON_H
+#define __PPC_XMON_H
+#ifdef __KERNEL__
+
+struct pt_regs;
+
+extern void xmon(struct pt_regs *excp);
+extern void xmon_printf(const char *fmt, ...);
+extern void xmon_init(int);
+
+#endif
+#endif
diff --git a/include/asm-ppc/a.out.h b/include/asm-ppc/a.out.h
deleted file mode 100644 (file)
index 8979a94..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __PPC_A_OUT_H__
-#define __PPC_A_OUT_H__
-
-/* grabbed from the intel stuff  */
-#define STACK_TOP TASK_SIZE
-
-
-struct exec
-{
-  unsigned long a_info;                /* Use macros N_MAGIC, etc for access */
-  unsigned a_text;             /* length of text, in bytes */
-  unsigned a_data;             /* length of data, in bytes */
-  unsigned a_bss;              /* length of uninitialized data area for file, in bytes */
-  unsigned a_syms;             /* length of symbol table data in file, in bytes */
-  unsigned a_entry;            /* start address */
-  unsigned a_trsize;           /* length of relocation info for text, in bytes */
-  unsigned a_drsize;           /* length of relocation info for data, in bytes */
-};
-
-
-#define N_TRSIZE(a)    ((a).a_trsize)
-#define N_DRSIZE(a)    ((a).a_drsize)
-#define N_SYMSIZE(a)   ((a).a_syms)
-
-
-#endif
diff --git a/include/asm-ppc/auxvec.h b/include/asm-ppc/auxvec.h
deleted file mode 100644 (file)
index 172358d..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __PPC_AUXVEC_H
-#define __PPC_AUXVEC_H
-
-/*
- * We need to put in some extra aux table entries to tell glibc what
- * the cache block size is, so it can use the dcbz instruction safely.
- */
-#define AT_DCACHEBSIZE         19
-#define AT_ICACHEBSIZE         20
-#define AT_UCACHEBSIZE         21
-/* A special ignored type value for PPC, for glibc compatibility.  */
-#define AT_IGNOREPPC           22
-
-#endif
diff --git a/include/asm-ppc/bug.h b/include/asm-ppc/bug.h
deleted file mode 100644 (file)
index 8b34fd6..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _PPC_BUG_H
-#define _PPC_BUG_H
-
-struct bug_entry {
-       unsigned long   bug_addr;
-       int             line;
-       const char      *file;
-       const char      *function;
-};
-
-/*
- * If this bit is set in the line number it means that the trap
- * is for WARN_ON rather than BUG or BUG_ON.
- */
-#define BUG_WARNING_TRAP       0x1000000
-
-#ifdef CONFIG_BUG
-#define BUG() do {                                                      \
-       __asm__ __volatile__(                                            \
-               "1:     twi 31,0,0\n"                                    \
-               ".section __bug_table,\"a\"\n\t"                         \
-               "       .long 1b,%0,%1,%2\n"                             \
-               ".previous"                                              \
-               : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
-} while (0)
-
-#define BUG_ON(x) do {                                                 \
-       if (!__builtin_constant_p(x) || (x)) {                          \
-               __asm__ __volatile__(                                   \
-                       "1:     twnei %0,0\n"                           \
-                       ".section __bug_table,\"a\"\n\t"                \
-                       "       .long 1b,%1,%2,%3\n"                    \
-                       ".previous"                                     \
-                       : : "r" (x), "i" (__LINE__), "i" (__FILE__),    \
-                           "i" (__FUNCTION__));                        \
-       }                                                               \
-} while (0)
-
-#define WARN_ON(x) do {                                                        \
-       if (!__builtin_constant_p(x) || (x)) {                          \
-               __asm__ __volatile__(                                   \
-                       "1:     twnei %0,0\n"                           \
-                       ".section __bug_table,\"a\"\n\t"                \
-                       "       .long 1b,%1,%2,%3\n"                    \
-                       ".previous"                                     \
-                       : : "r" (x), "i" (__LINE__ + BUG_WARNING_TRAP), \
-                           "i" (__FILE__), "i" (__FUNCTION__));        \
-       }                                                               \
-} while (0)
-
-#define HAVE_ARCH_BUG
-#define HAVE_ARCH_BUG_ON
-#define HAVE_ARCH_WARN_ON
-#endif
-
-#include <asm-generic/bug.h>
-
-#endif
diff --git a/include/asm-ppc/byteorder.h b/include/asm-ppc/byteorder.h
deleted file mode 100644 (file)
index c63c81e..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef _PPC_BYTEORDER_H
-#define _PPC_BYTEORDER_H
-
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-#ifdef __KERNEL__
-
-extern __inline__ unsigned ld_le16(const volatile unsigned short *addr)
-{
-       unsigned val;
-
-       __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
-       return val;
-}
-
-extern __inline__ void st_le16(volatile unsigned short *addr, const unsigned val)
-{
-       __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-extern __inline__ unsigned ld_le32(const volatile unsigned *addr)
-{
-       unsigned val;
-
-       __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
-       return val;
-}
-
-extern __inline__ void st_le32(volatile unsigned *addr, const unsigned val)
-{
-       __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
-{
-       __u16 result;
-
-       __asm__("rlwimi %0,%2,8,16,23" : "=&r" (result) : "0" (value >> 8), "r" (value));
-       return result;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
-{
-       __u32 result;
-
-       __asm__("rlwimi %0,%2,24,16,23" : "=&r" (result) : "0" (value>>24), "r" (value));
-       __asm__("rlwimi %0,%2,8,8,15"   : "=&r" (result) : "0" (result),    "r" (value));
-       __asm__("rlwimi %0,%2,24,0,7"   : "=&r" (result) : "0" (result),    "r" (value));
-
-       return result;
-}
-#define __arch__swab32(x) ___arch__swab32(x)
-#define __arch__swab16(x) ___arch__swab16(x)
-
-/* The same, but returns converted value from the location pointer by addr. */
-#define __arch__swab16p(addr) ld_le16(addr)
-#define __arch__swab32p(addr) ld_le32(addr)
-
-/* The same, but do the conversion in situ, ie. put the value back to addr. */
-#define __arch__swab16s(addr) st_le16(addr,*addr)
-#define __arch__swab32s(addr) st_le32(addr,*addr)
-
-#endif /* __KERNEL__ */
-
-#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-#  define __BYTEORDER_HAS_U64__
-#  define __SWAB_64_THRU_32__
-#endif
-
-#endif /* __GNUC__ */
-
-#include <linux/byteorder/big_endian.h>
-
-#endif /* _PPC_BYTEORDER_H */
index 38f2f1b..7a157d0 100644 (file)
@@ -9,21 +9,18 @@
 
 /* bytes per L1 cache line */
 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
-#define        L1_CACHE_LINE_SIZE      16
-#define LG_L1_CACHE_LINE_SIZE  4
+#define L1_CACHE_SHIFT 4
 #define MAX_COPY_PREFETCH      1
 #elif defined(CONFIG_PPC64BRIDGE)
-#define L1_CACHE_LINE_SIZE     128
-#define LG_L1_CACHE_LINE_SIZE  7
+#define L1_CACHE_SHIFT 7
 #define MAX_COPY_PREFETCH      1
 #else
-#define        L1_CACHE_LINE_SIZE      32
-#define LG_L1_CACHE_LINE_SIZE  5
+#define L1_CACHE_SHIFT 5
 #define MAX_COPY_PREFETCH      4
 #endif
 
-#define        L1_CACHE_BYTES L1_CACHE_LINE_SIZE
-#define L1_CACHE_SHIFT LG_L1_CACHE_LINE_SIZE
+#define        L1_CACHE_BYTES  (1 << L1_CACHE_SHIFT)
+
 #define        SMP_CACHE_BYTES L1_CACHE_BYTES
 #define L1_CACHE_SHIFT_MAX 7   /* largest L1 which this arch supports */
 
diff --git a/include/asm-ppc/cputable.h b/include/asm-ppc/cputable.h
deleted file mode 100644 (file)
index e17c492..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- *  include/asm-ppc/cputable.h
- *
- *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#ifndef __ASM_PPC_CPUTABLE_H
-#define __ASM_PPC_CPUTABLE_H
-
-/* Exposed to userland CPU features */
-#define PPC_FEATURE_32                 0x80000000
-#define PPC_FEATURE_64                 0x40000000
-#define PPC_FEATURE_601_INSTR          0x20000000
-#define PPC_FEATURE_HAS_ALTIVEC                0x10000000
-#define PPC_FEATURE_HAS_FPU            0x08000000
-#define PPC_FEATURE_HAS_MMU            0x04000000
-#define PPC_FEATURE_HAS_4xxMAC         0x02000000
-#define PPC_FEATURE_UNIFIED_CACHE      0x01000000
-#define PPC_FEATURE_HAS_SPE            0x00800000
-#define PPC_FEATURE_HAS_EFP_SINGLE     0x00400000
-#define PPC_FEATURE_HAS_EFP_DOUBLE     0x00200000
-#define PPC_FEATURE_NO_TB              0x00100000
-
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-
-/* This structure can grow, it's real size is used by head.S code
- * via the mkdefs mecanism.
- */
-struct cpu_spec;
-
-typedef        void (*cpu_setup_t)(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-
-struct cpu_spec {
-       /* CPU is matched via (PVR & pvr_mask) == pvr_value */
-       unsigned int    pvr_mask;
-       unsigned int    pvr_value;
-
-       char            *cpu_name;
-       unsigned int    cpu_features;           /* Kernel features */
-       unsigned int    cpu_user_features;      /* Userland features */
-
-       /* cache line sizes */
-       unsigned int    icache_bsize;
-       unsigned int    dcache_bsize;
-
-       /* number of performance monitor counters */
-       unsigned int    num_pmcs;
-
-       /* this is called to initialize various CPU bits like L1 cache,
-        * BHT, SPD, etc... from head.S before branching to identify_machine
-        */
-       cpu_setup_t     cpu_setup;
-};
-
-extern struct cpu_spec         cpu_specs[];
-extern struct cpu_spec         *cur_cpu_spec[];
-
-static inline unsigned int cpu_has_feature(unsigned int feature)
-{
-       return cur_cpu_spec[0]->cpu_features & feature;
-}
-
-#endif /* __ASSEMBLY__ */
-
-/* CPU kernel features */
-#define CPU_FTR_SPLIT_ID_CACHE         0x00000001
-#define CPU_FTR_L2CR                   0x00000002
-#define CPU_FTR_SPEC7450               0x00000004
-#define CPU_FTR_ALTIVEC                        0x00000008
-#define CPU_FTR_TAU                    0x00000010
-#define CPU_FTR_CAN_DOZE               0x00000020
-#define CPU_FTR_USE_TB                 0x00000040
-#define CPU_FTR_604_PERF_MON           0x00000080
-#define CPU_FTR_601                    0x00000100
-#define CPU_FTR_HPTE_TABLE             0x00000200
-#define CPU_FTR_CAN_NAP                        0x00000400
-#define CPU_FTR_L3CR                   0x00000800
-#define CPU_FTR_L3_DISABLE_NAP         0x00001000
-#define CPU_FTR_NAP_DISABLE_L2_PR      0x00002000
-#define CPU_FTR_DUAL_PLL_750FX         0x00004000
-#define CPU_FTR_NO_DPM                 0x00008000
-#define CPU_FTR_HAS_HIGH_BATS          0x00010000
-#define CPU_FTR_NEED_COHERENT          0x00020000
-#define CPU_FTR_NO_BTIC                        0x00040000
-#define CPU_FTR_BIG_PHYS               0x00080000
-
-#ifdef __ASSEMBLY__
-
-#define BEGIN_FTR_SECTION              98:
-
-#define END_FTR_SECTION(msk, val)              \
-99:                                            \
-       .section __ftr_fixup,"a";               \
-       .align 2;                               \
-       .long msk;                              \
-       .long val;                              \
-       .long 98b;                              \
-       .long 99b;                              \
-       .previous
-
-#else
-
-#define BEGIN_FTR_SECTION              "98:\n"
-#define END_FTR_SECTION(msk, val)              \
-"99:\n"                                                \
-"      .section __ftr_fixup,\"a\";\n"          \
-"      .align 2;\n"                            \
-"      .long "#msk";\n"                        \
-"      .long "#val";\n"                        \
-"      .long 98b;\n"                           \
-"      .long 99b;\n"                           \
-"      .previous\n"
-
-
-#endif /* __ASSEMBLY__ */
-
-#define END_FTR_SECTION_IFSET(msk)     END_FTR_SECTION((msk), (msk))
-#define END_FTR_SECTION_IFCLR(msk)     END_FTR_SECTION((msk), 0)
-
-#endif /* __ASM_PPC_CPUTABLE_H */
-#endif /* __KERNEL__ */
-
diff --git a/include/asm-ppc/elf.h b/include/asm-ppc/elf.h
deleted file mode 100644 (file)
index c25cc35..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-#ifndef __PPC_ELF_H
-#define __PPC_ELF_H
-
-/*
- * ELF register definitions..
- */
-#include <asm/types.h>
-#include <asm/ptrace.h>
-#include <asm/cputable.h>
-#include <asm/auxvec.h>
-
-/* PowerPC relocations defined by the ABIs */
-#define R_PPC_NONE             0
-#define R_PPC_ADDR32           1       /* 32bit absolute address */
-#define R_PPC_ADDR24           2       /* 26bit address, 2 bits ignored.  */
-#define R_PPC_ADDR16           3       /* 16bit absolute address */
-#define R_PPC_ADDR16_LO                4       /* lower 16bit of absolute address */
-#define R_PPC_ADDR16_HI                5       /* high 16bit of absolute address */
-#define R_PPC_ADDR16_HA                6       /* adjusted high 16bit */
-#define R_PPC_ADDR14           7       /* 16bit address, 2 bits ignored */
-#define R_PPC_ADDR14_BRTAKEN   8
-#define R_PPC_ADDR14_BRNTAKEN  9
-#define R_PPC_REL24            10      /* PC relative 26 bit */
-#define R_PPC_REL14            11      /* PC relative 16 bit */
-#define R_PPC_REL14_BRTAKEN    12
-#define R_PPC_REL14_BRNTAKEN   13
-#define R_PPC_GOT16            14
-#define R_PPC_GOT16_LO         15
-#define R_PPC_GOT16_HI         16
-#define R_PPC_GOT16_HA         17
-#define R_PPC_PLTREL24         18
-#define R_PPC_COPY             19
-#define R_PPC_GLOB_DAT         20
-#define R_PPC_JMP_SLOT         21
-#define R_PPC_RELATIVE         22
-#define R_PPC_LOCAL24PC                23
-#define R_PPC_UADDR32          24
-#define R_PPC_UADDR16          25
-#define R_PPC_REL32            26
-#define R_PPC_PLT32            27
-#define R_PPC_PLTREL32         28
-#define R_PPC_PLT16_LO         29
-#define R_PPC_PLT16_HI         30
-#define R_PPC_PLT16_HA         31
-#define R_PPC_SDAREL16         32
-#define R_PPC_SECTOFF          33
-#define R_PPC_SECTOFF_LO       34
-#define R_PPC_SECTOFF_HI       35
-#define R_PPC_SECTOFF_HA       36
-/* Keep this the last entry.  */
-#define R_PPC_NUM              37
-
-#define ELF_NGREG      48      /* includes nip, msr, lr, etc. */
-#define ELF_NFPREG     33      /* includes fpscr */
-#define ELF_NVRREG     33      /* includes vscr */
-#define ELF_NEVRREG    34      /* includes acc (as 2) */
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#define ELF_ARCH       EM_PPC
-#define ELF_CLASS      ELFCLASS32
-#define ELF_DATA       ELFDATA2MSB
-
-/* General registers */
-typedef unsigned long elf_greg_t;
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-/* Floating point registers */
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
-/* Altivec registers */
-typedef __vector128 elf_vrreg_t;
-typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
-
-#ifdef __KERNEL__
-
-struct task_struct;
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-
-#define elf_check_arch(x) ((x)->e_machine == EM_PPC)
-
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
-
-#define ELF_ET_DYN_BASE         (0x08000000)
-
-#define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE      4096
-
-#define ELF_CORE_COPY_REGS(gregs, regs)                                \
-       memcpy((gregs), (regs), sizeof(struct pt_regs));        \
-       memset((char *)(gregs) + sizeof(struct pt_regs), 0,     \
-              sizeof(elf_gregset_t) - sizeof(struct pt_regs));
-
-#define ELF_CORE_COPY_TASK_REGS(t, elfregs)                    \
-       ((t)->thread.regs?                                      \
-        ({ ELF_CORE_COPY_REGS((elfregs), (t)->thread.regs); 1; }): 0)
-
-extern int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpu);
-#define ELF_CORE_COPY_FPREGS(t, fpu)   dump_task_fpu((t), (fpu))
-
-/* This yields a mask that user programs can use to figure out what
-   instruction set this cpu supports.  This could be done in userspace,
-   but it's not easy, and we've already done it here.  */
-
-#define ELF_HWCAP      (cur_cpu_spec[0]->cpu_user_features)
-
-/* This yields a string that ld.so will use to load implementation
-   specific libraries for optimization.  This is more specific in
-   intent than poking at uname or /proc/cpuinfo.
-
-   For the moment, we have only optimizations for the Intel generations,
-   but that could change... */
-
-#define ELF_PLATFORM   (NULL)
-
-#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
-
-extern int dcache_bsize;
-extern int icache_bsize;
-extern int ucache_bsize;
-
-/*
- * The requirements here are:
- * - keep the final alignment of sp (sp & 0xf)
- * - make sure the 32-bit value at the first 16 byte aligned position of
- *   AUXV is greater than 16 for glibc compatibility.
- *   AT_IGNOREPPC is used for that.
- * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
- *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
- */
-#define ARCH_DLINFO                                                    \
-do {                                                                   \
-       /* Handle glibc compatibility. */                               \
-       NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);                        \
-       NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);                        \
-       /* Cache size items */                                          \
-       NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize);                      \
-       NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize);                      \
-       NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize);                      \
- } while (0)
-
-#endif /* __KERNEL__ */
-#endif
diff --git a/include/asm-ppc/hw_irq.h b/include/asm-ppc/hw_irq.h
deleted file mode 100644 (file)
index 47dc799..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- */
-#ifdef __KERNEL__
-#ifndef _PPC_HW_IRQ_H
-#define _PPC_HW_IRQ_H
-
-#include <asm/ptrace.h>
-#include <asm/reg.h>
-
-extern void timer_interrupt(struct pt_regs *);
-
-#define INLINE_IRQS
-
-#define irqs_disabled()        ((mfmsr() & MSR_EE) == 0)
-
-#ifdef INLINE_IRQS
-
-static inline void local_irq_disable(void)
-{
-       unsigned long msr;
-       msr = mfmsr();
-       mtmsr(msr & ~MSR_EE);
-       __asm__ __volatile__("": : :"memory");
-}
-
-static inline void local_irq_enable(void)
-{
-       unsigned long msr;
-       __asm__ __volatile__("": : :"memory");
-       msr = mfmsr();
-       mtmsr(msr | MSR_EE);
-}
-
-static inline void local_irq_save_ptr(unsigned long *flags)
-{
-       unsigned long msr;
-       msr = mfmsr();
-       *flags = msr;
-       mtmsr(msr & ~MSR_EE);
-       __asm__ __volatile__("": : :"memory");
-}
-
-#define local_save_flags(flags)                ((flags) = mfmsr())
-#define local_irq_save(flags)          local_irq_save_ptr(&flags)
-#define local_irq_restore(flags)       mtmsr(flags)
-
-#else
-
-extern void local_irq_enable(void);
-extern void local_irq_disable(void);
-extern void local_irq_restore(unsigned long);
-extern void local_save_flags_ptr(unsigned long *);
-
-#define local_save_flags(flags) local_save_flags_ptr(&flags)
-#define local_irq_save(flags) ({local_save_flags(flags);local_irq_disable();})
-
-#endif
-
-extern void do_lost_interrupts(unsigned long);
-
-#define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
-#define unmask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->enable) irq_desc[irq].handler->enable(irq);})
-#define ack_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->ack) irq_desc[irq].handler->ack(irq);})
-
-/* Should we handle this via lost interrupts and IPIs or should we don't care like
- * we do now ? --BenH.
- */
-struct hw_interrupt_type;
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
-
-
-#endif /* _PPC_HW_IRQ_H */
-#endif /* __KERNEL__ */
index 94d8399..f7f614d 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <asm/page.h>
 #include <asm/byteorder.h>
+#include <asm/synch.h>
 #include <asm/mmu.h>
 
 #define SIO_CONFIG_RA  0x398
@@ -440,16 +441,6 @@ extern inline void * phys_to_virt(unsigned long address)
 #define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
 #define page_to_bus(page)      (page_to_phys(page) + PCI_DRAM_OFFSET)
 
-/*
- * Enforce In-order Execution of I/O:
- * Acts as a barrier to ensure all previous I/O accesses have
- * completed before any further ones are issued.
- */
-extern inline void eieio(void)
-{
-       __asm__ __volatile__ ("eieio" : : : "memory");
-}
-
 /* Enforce in-order execution of data I/O.
  * No distinction between read/write on PPC; use eieio for all three.
  */
diff --git a/include/asm-ppc/kmap_types.h b/include/asm-ppc/kmap_types.h
deleted file mode 100644 (file)
index 6d6fc78..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BIO_SRC_IRQ,
-       KM_BIO_DST_IRQ,
-       KM_PTE0,
-       KM_PTE1,
-       KM_IRQ0,
-       KM_IRQ1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,
-       KM_PPC_SYNC_PAGE,
-       KM_PPC_SYNC_ICACHE,
-       KM_TYPE_NR
-};
-
-#endif
-#endif /* __KERNEL__ */
index 1d4ab70..6c6d23a 100644 (file)
@@ -167,7 +167,7 @@ extern sys_ctrler_t sys_ctrler;
 
 #ifdef CONFIG_SMP
 struct smp_ops_t {
-       void  (*message_pass)(int target, int msg, unsigned long data, int wait);
+       void  (*message_pass)(int target, int msg);
        int   (*probe)(void);
        void  (*kick_cpu)(int nr);
        void  (*setup_cpu)(int nr);
index afe26ff..4f152cc 100644 (file)
@@ -164,13 +164,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                             struct task_struct *tsk)
 {
 #ifdef CONFIG_ALTIVEC
-       asm volatile (
- BEGIN_FTR_SECTION
-       "dssall;\n"
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+       asm volatile ("dssall;\n"
 #ifndef CONFIG_POWER4
         "sync;\n" /* G4 needs a sync here, G5 apparently not */
 #endif
- END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
         : : );
 #endif /* CONFIG_ALTIVEC */
 
index 7848aa6..ec2f466 100644 (file)
@@ -58,8 +58,7 @@ extern int openpic_get_irq(struct pt_regs *regs);
 extern void openpic_reset_processor_phys(u_int cpumask);
 extern void openpic_setup_ISU(int isu_num, unsigned long addr);
 extern void openpic_cause_IPI(u_int ipi, cpumask_t cpumask);
-extern void smp_openpic_message_pass(int target, int msg, unsigned long data,
-                                    int wait);
+extern void smp_openpic_message_pass(int target, int msg);
 extern void openpic_set_k2_cascade(int irq);
 extern void openpic_set_priority(u_int pri);
 extern u_int openpic_get_priority(void);
index 4789dc0..fc44f7c 100644 (file)
@@ -34,6 +34,17 @@ typedef unsigned long pte_basic_t;
 #define PTE_FMT                "%.8lx"
 #endif
 
+/* align addr on a size boundary - adjust address up/down if needed */
+#define _ALIGN_UP(addr,size)   (((addr)+((size)-1))&(~((size)-1)))
+#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
+
+/* align addr on a size boundary - adjust address up if needed */
+#define _ALIGN(addr,size)     _ALIGN_UP(addr,size)
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)       _ALIGN(addr, PAGE_SIZE)
+
+
 #undef STRICT_MM_TYPECHECKS
 
 #ifdef STRICT_MM_TYPECHECKS
@@ -76,13 +87,6 @@ typedef unsigned long pgprot_t;
 
 #endif
 
-
-/* align addr on a size boundary - adjust address up if needed -- Cort */
-#define _ALIGN(addr,size)      (((addr)+(size)-1)&(~((size)-1)))
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr)       (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
 struct page;
 extern void clear_pages(void *page, int order);
 static inline void clear_page(void *page) { clear_pages(page, 0); }
index ffa4234..e58c78f 100644 (file)
@@ -79,6 +79,11 @@ struct pci_controller {
        struct resource mem_space;
 };
 
+static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
+{
+       return bus->sysdata;
+}
+
 /* These are used for config access before all the PCI probing
    has been done. */
 int early_read_config_byte(struct pci_controller *hose, int bus, int dev_fn,
index 9dd06cd..643740d 100644 (file)
@@ -24,9 +24,9 @@ struct pci_dev;
  * Set this to 1 if you want the kernel to re-assign all PCI
  * bus numbers
  */
-extern int pci_assign_all_busses;
+extern int pci_assign_all_buses;
 
-#define pcibios_assign_all_busses()    (pci_assign_all_busses)
+#define pcibios_assign_all_busses()    (pci_assign_all_buses)
 #define pcibios_scan_all_fns(a, b)     0
 
 #define PCIBIOS_MIN_IO         0x1000
diff --git a/include/asm-ppc/perfmon.h b/include/asm-ppc/perfmon.h
deleted file mode 100644 (file)
index 5e7a89c..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __PERFMON_H
-#define __PERFMON_H
-
-extern void (*perf_irq)(struct pt_regs *);
-
-int request_perfmon_irq(void (*handler)(struct pt_regs *));
-void free_perfmon_irq(void);
-
-#ifdef CONFIG_FSL_BOOKE
-void init_pmc_stop(int ctr);
-void set_pmc_event(int ctr, int event);
-void set_pmc_user_kernel(int ctr, int user, int kernel);
-void set_pmc_marked(int ctr, int mark0, int mark1);
-void pmc_start_ctr(int ctr, int enable);
-void pmc_start_ctrs(int enable);
-void pmc_stop_ctrs(void);
-void dump_pmcs(void);
-
-extern struct op_ppc32_model op_model_fsl_booke;
-#endif
-
-#endif /* __PERFMON_H */
diff --git a/include/asm-ppc/posix_types.h b/include/asm-ppc/posix_types.h
deleted file mode 100644 (file)
index a14a82a..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef _PPC_POSIX_TYPES_H
-#define _PPC_POSIX_TYPES_H
-
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc.  Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned long  __kernel_ino_t;
-typedef unsigned int   __kernel_mode_t;
-typedef unsigned short __kernel_nlink_t;
-typedef long           __kernel_off_t;
-typedef int            __kernel_pid_t;
-typedef unsigned int   __kernel_uid_t;
-typedef unsigned int   __kernel_gid_t;
-typedef unsigned int   __kernel_size_t;
-typedef int            __kernel_ssize_t;
-typedef long           __kernel_ptrdiff_t;
-typedef long           __kernel_time_t;
-typedef long           __kernel_suseconds_t;
-typedef long           __kernel_clock_t;
-typedef int            __kernel_timer_t;
-typedef int            __kernel_clockid_t;
-typedef int            __kernel_daddr_t;
-typedef char *         __kernel_caddr_t;
-typedef short             __kernel_ipc_pid_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int   __kernel_uid32_t;
-typedef unsigned int   __kernel_gid32_t;
-
-typedef unsigned int   __kernel_old_uid_t;
-typedef unsigned int   __kernel_old_gid_t;
-typedef unsigned int   __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long      __kernel_loff_t;
-#endif
-
-typedef struct {
-       int     val[2];
-} __kernel_fsid_t;
-
-#ifndef __GNUC__
-
-#define        __FD_SET(d, set)        ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-#define        __FD_CLR(d, set)        ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-#define        __FD_ISSET(d, set)      ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
-#define        __FD_ZERO(set)  \
-  ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
-
-#else /* __GNUC__ */
-
-#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) \
-    || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 0)
-/* With GNU C, use inline functions instead so args are evaluated only once: */
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
-       unsigned long _tmp = fd / __NFDBITS;
-       unsigned long _rem = fd % __NFDBITS;
-       fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
-       unsigned long _tmp = fd / __NFDBITS;
-       unsigned long _rem = fd % __NFDBITS;
-       fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
-{
-       unsigned long _tmp = fd / __NFDBITS;
-       unsigned long _rem = fd % __NFDBITS;
-       return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
-{
-       unsigned int *tmp = (unsigned int *)p->fds_bits;
-       int i;
-
-       if (__builtin_constant_p(__FDSET_LONGS)) {
-               switch (__FDSET_LONGS) {
-                       case 8:
-                               tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
-                               tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
-                               return;
-               }
-       }
-       i = __FDSET_LONGS;
-       while (i) {
-               i--;
-               *tmp = 0;
-               tmp++;
-       }
-}
-
-#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
-#endif /* __GNUC__ */
-#endif /* _PPC_POSIX_TYPES_H */
index 7043c16..c34fb4e 100644 (file)
@@ -57,7 +57,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
 
 #define force_successful_syscall_return()   \
        do { \
-               current_thread_info()->local_flags |= _TIFL_FORCE_NOERROR; \
+               current_thread_info()->syscall_noerror = 1; \
        } while(0)
 
 /*
diff --git a/include/asm-ppc/rwsem.h b/include/asm-ppc/rwsem.h
deleted file mode 100644 (file)
index 3e738f4..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
- * in lib/rwsem.c.  Adapted largely from include/asm-i386/rwsem.h
- * by Paul Mackerras <paulus@samba.org>.
- */
-
-#ifndef _PPC_RWSEM_H
-#define _PPC_RWSEM_H
-
-#ifdef __KERNEL__
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
-       /* XXX this should be able to be an atomic_t  -- paulus */
-       signed long             count;
-#define RWSEM_UNLOCKED_VALUE           0x00000000
-#define RWSEM_ACTIVE_BIAS              0x00000001
-#define RWSEM_ACTIVE_MASK              0x0000ffff
-#define RWSEM_WAITING_BIAS             (-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-       spinlock_t              wait_lock;
-       struct list_head        wait_list;
-#if RWSEM_DEBUG
-       int                     debug;
-#endif
-};
-
-/*
- * initialisation
- */
-#if RWSEM_DEBUG
-#define __RWSEM_DEBUG_INIT      , 0
-#else
-#define __RWSEM_DEBUG_INIT     /* */
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
-       { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
-         LIST_HEAD_INIT((name).wait_list) \
-         __RWSEM_DEBUG_INIT }
-
-#define DECLARE_RWSEM(name)            \
-       struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
-       sem->count = RWSEM_UNLOCKED_VALUE;
-       spin_lock_init(&sem->wait_lock);
-       INIT_LIST_HEAD(&sem->wait_list);
-#if RWSEM_DEBUG
-       sem->debug = 0;
-#endif
-}
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-       if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
-               smp_wmb();
-       else
-               rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       while ((tmp = sem->count) >= 0) {
-               if (tmp == cmpxchg(&sem->count, tmp,
-                                  tmp + RWSEM_ACTIVE_READ_BIAS)) {
-                       smp_wmb();
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-                               (atomic_t *)(&sem->count));
-       if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
-               smp_wmb();
-       else
-               rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-                     RWSEM_ACTIVE_WRITE_BIAS);
-       smp_wmb();
-       return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       smp_wmb();
-       tmp = atomic_dec_return((atomic_t *)(&sem->count));
-       if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
-               rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-       smp_wmb();
-       if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-                             (atomic_t *)(&sem->count)) < 0)
-               rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
-{
-       atomic_add(delta, (atomic_t *)(&sem->count));
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       smp_wmb();
-       tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
-       if (tmp < 0)
-               rwsem_downgrade_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-       smp_mb();
-       return atomic_add_return(delta, (atomic_t *)(&sem->count));
-}
-
-#endif /* __KERNEL__ */
-#endif /* _PPC_RWSEM_XADD_H */
diff --git a/include/asm-ppc/seccomp.h b/include/asm-ppc/seccomp.h
deleted file mode 100644 (file)
index 666c4da..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_SECCOMP_H
-
-#include <linux/unistd.h>
-
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
-#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-ppc/sections.h b/include/asm-ppc/sections.h
deleted file mode 100644 (file)
index ba8f43a..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _PPC_SECTIONS_H
-#define _PPC_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-#define __pmac __attribute__ ((__section__ (".pmac.text")))
-#define __pmacdata __attribute__ ((__section__ (".pmac.data")))
-#define __pmacfunc(__argpmac) \
-       __argpmac __pmac; \
-       __argpmac
-       
-#define __prep __attribute__ ((__section__ (".prep.text")))
-#define __prepdata __attribute__ ((__section__ (".prep.data")))
-#define __prepfunc(__argprep) \
-       __argprep __prep; \
-       __argprep
-
-#define __chrp __attribute__ ((__section__ (".chrp.text")))
-#define __chrpdata __attribute__ ((__section__ (".chrp.data")))
-#define __chrpfunc(__argchrp) \
-       __argchrp __chrp; \
-       __argchrp
-
-/* this is actually just common chrp/pmac code, not OF code -- Cort */
-#define __openfirmware __attribute__ ((__section__ (".openfirmware.text")))
-#define __openfirmwaredata __attribute__ ((__section__ (".openfirmware.data")))
-#define __openfirmwarefunc(__argopenfirmware) \
-       __argopenfirmware __openfirmware; \
-       __argopenfirmware
-       
-#endif /* _PPC_SECTIONS_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/semaphore.h b/include/asm-ppc/semaphore.h
deleted file mode 100644 (file)
index 89e6e73..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef _PPC_SEMAPHORE_H
-#define _PPC_SEMAPHORE_H
-
-/*
- * Swiped from asm-sparc/semaphore.h and modified
- * -- Cort (cort@cs.nmt.edu)
- *
- * Stole some rw spinlock-based semaphore stuff from asm-alpha/semaphore.h
- * -- Ani Joshi (ajoshi@unixbox.com)
- *
- * Remove spinlock-based RW semaphores; RW semaphore definitions are
- * now in rwsem.h and we use the generic lib/rwsem.c implementation.
- * Rework semaphores to use atomic_dec_if_positive.
- * -- Paul Mackerras (paulus@samba.org)
- */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-       /*
-        * Note that any negative value of count is equivalent to 0,
-        * but additionally indicates that some process(es) might be
-        * sleeping on `wait'.
-        */
-       atomic_t count;
-       wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)                               \
-{                                                                      \
-       .count          = ATOMIC_INIT(n),                               \
-       .wait           = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)    \
-}
-
-#define __MUTEX_INITIALIZER(name) \
-       __SEMAPHORE_INITIALIZER(name, 1)
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
-       struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name)            __DECLARE_SEMAPHORE_GENERIC(name, 1)
-#define DECLARE_MUTEX_LOCKED(name)     __DECLARE_SEMAPHORE_GENERIC(name, 0)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-       atomic_set(&sem->count, val);
-       init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-       sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-       sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int  __down_interruptible(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-extern inline void down(struct semaphore * sem)
-{
-       might_sleep();
-
-       /*
-        * Try to get the semaphore, take the slow path if we fail.
-        */
-       if (atomic_dec_return(&sem->count) < 0)
-               __down(sem);
-       smp_wmb();
-}
-
-extern inline int down_interruptible(struct semaphore * sem)
-{
-       int ret = 0;
-
-       might_sleep();
-
-       if (atomic_dec_return(&sem->count) < 0)
-               ret = __down_interruptible(sem);
-       smp_wmb();
-       return ret;
-}
-
-extern inline int down_trylock(struct semaphore * sem)
-{
-       int ret;
-
-       ret = atomic_dec_if_positive(&sem->count) < 0;
-       smp_wmb();
-       return ret;
-}
-
-extern inline void up(struct semaphore * sem)
-{
-       smp_wmb();
-       if (atomic_inc_return(&sem->count) <= 0)
-               __up(sem);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* !(_PPC_SEMAPHORE_H) */
index 829481c..063d7de 100644 (file)
@@ -35,6 +35,7 @@ extern cpumask_t cpu_possible_map;
 extern unsigned long smp_proc_in_lock[];
 extern volatile unsigned long cpu_callin_map[];
 extern int smp_tb_synchronized;
+extern struct smp_ops_t *smp_ops;
 
 extern void smp_send_tlb_invalidate(int);
 extern void smp_send_xmon_break(int cpu);
@@ -45,30 +46,21 @@ extern int __cpu_disable(void);
 extern void __cpu_die(unsigned int cpu);
 extern void cpu_die(void) __attribute__((noreturn));
 
-#define NO_PROC_ID             0xFF            /* No processor magic marker */
-#define PROC_CHANGE_PENALTY    20
-
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern int __cpu_up(unsigned int cpu);
 
 extern int smp_hw_index[];
-#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
-
-struct klock_info_struct {
-       unsigned long kernel_flag;
-       unsigned char akp;
-};
-
-extern struct klock_info_struct klock_info;
-#define KLOCK_HELD       0xffffffff
-#define KLOCK_CLEAR      0x0
+#define hard_smp_processor_id()        (smp_hw_index[smp_processor_id()])
+#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
 
 #endif /* __ASSEMBLY__ */
 
 #else /* !(CONFIG_SMP) */
 
 static inline void cpu_die(void) { }
+#define get_hard_smp_processor_id(cpu) 0
+#define hard_smp_processor_id() 0
 
 #endif /* !(CONFIG_SMP) */
 
index 20edcf2..5c64b75 100644 (file)
@@ -9,7 +9,7 @@
  * (the type definitions are in asm/raw_spinlock_types.h)
  */
 
-#define __raw_spin_is_locked(x)                ((x)->lock != 0)
+#define __raw_spin_is_locked(x)                ((x)->slock != 0)
 #define __raw_spin_unlock_wait(lock) \
        do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -31,17 +31,17 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
        bne-    2b\n\
        isync"
        : "=&r"(tmp)
-       : "r"(&lock->lock), "r"(1)
+       : "r"(&lock->slock), "r"(1)
        : "cr0", "memory");
 }
 
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__("eieio     # __raw_spin_unlock": : :"memory");
-       lock->lock = 0;
+       lock->slock = 0;
 }
 
-#define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock))
+#define __raw_spin_trylock(l) (!test_and_set_bit(0,(volatile unsigned long *)(&(l)->slock)))
 
 /*
  * Read-write spinlocks, allowing multiple readers
diff --git a/include/asm-ppc/spinlock_types.h b/include/asm-ppc/spinlock_types.h
deleted file mode 100644 (file)
index 7919ccc..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __ASM_SPINLOCK_TYPES_H
-#define __ASM_SPINLOCK_TYPES_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
-
-typedef struct {
-       volatile unsigned long lock;
-} raw_spinlock_t;
-
-#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
-
-typedef struct {
-       volatile signed int lock;
-} raw_rwlock_t;
-
-#define __RAW_RW_LOCK_UNLOCKED         { 0 }
-
-#endif
diff --git a/include/asm-ppc/statfs.h b/include/asm-ppc/statfs.h
deleted file mode 100644 (file)
index 807c699..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _PPC_STATFS_H
-#define _PPC_STATFS_H
-
-#include <asm-generic/statfs.h>
-#endif
-
-
-
index d754ab5..1f31078 100644 (file)
@@ -74,21 +74,42 @@ extern void read_rtc_time(void);
 extern void pmac_find_display(void);
 extern void giveup_fpu(struct task_struct *);
 extern void enable_kernel_fp(void);
+extern void flush_fp_to_thread(struct task_struct *);
 extern void enable_kernel_altivec(void);
 extern void giveup_altivec(struct task_struct *);
 extern void load_up_altivec(struct task_struct *);
+extern int emulate_altivec(struct pt_regs *);
 extern void giveup_spe(struct task_struct *);
 extern void load_up_spe(struct task_struct *);
 extern int fix_alignment(struct pt_regs *);
 extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
 extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+
+#ifdef CONFIG_ALTIVEC
+extern void flush_altivec_to_thread(struct task_struct *);
+#else
+static inline void flush_altivec_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+#ifdef CONFIG_SPE
+extern void flush_spe_to_thread(struct task_struct *);
+#else
+static inline void flush_spe_to_thread(struct task_struct *t)
+{
+}
+#endif
+
 extern int call_rtas(const char *, int, int, unsigned long *, ...);
 extern void cacheable_memzero(void *p, unsigned int nb);
 extern void *cacheable_memcpy(void *, const void *, unsigned int);
 extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
 extern void bad_page_fault(struct pt_regs *, unsigned long, int);
-extern void die(const char *, struct pt_regs *, long);
+extern int die(const char *, struct pt_regs *, long);
 extern void _exception(int, struct pt_regs *, int, unsigned long);
+void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
+
 #ifdef CONFIG_BOOKE_WDT
 extern u32 booke_wdt_enabled;
 extern u32 booke_wdt_period;
diff --git a/include/asm-ppc/thread_info.h b/include/asm-ppc/thread_info.h
deleted file mode 100644 (file)
index 27903db..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/* thread_info.h: PPC low-level thread information
- * adapted from the i386 version by Paul Mackerras
- *
- * Copyright (C) 2002  David Howells (dhowells@redhat.com)
- * - Incorporating suggestions made by Linus Torvalds and Dave Miller
- */
-
-#ifndef _ASM_THREAD_INFO_H
-#define _ASM_THREAD_INFO_H
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-/*
- * low level task data.
- * If you change this, change the TI_* offsets below to match.
- */
-struct thread_info {
-       struct task_struct      *task;          /* main task structure */
-       struct exec_domain      *exec_domain;   /* execution domain */
-       unsigned long           flags;          /* low level flags */
-       unsigned long           local_flags;    /* non-racy flags */
-       int                     cpu;            /* cpu we're on */
-       int                     preempt_count;  /* 0 => preemptable,
-                                                  <0 => BUG */
-       struct restart_block    restart_block;
-};
-
-#define INIT_THREAD_INFO(tsk)                  \
-{                                              \
-       .task =         &tsk,                   \
-       .exec_domain =  &default_exec_domain,   \
-       .flags =        0,                      \
-       .local_flags =  0,                      \
-       .cpu =          0,                      \
-       .preempt_count = 1,                     \
-       .restart_block = {                      \
-               .fn = do_no_restart_syscall,    \
-       },                                      \
-}
-
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
-/*
- * macros/functions for gaining access to the thread information structure
- */
-
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
-       struct thread_info *ti;
-       __asm__("rlwinm %0,1,0,0,18" : "=r"(ti));
-       return ti;
-}
-
-/* thread information allocation */
-#define alloc_thread_info(tsk) ((struct thread_info *) \
-                               __get_free_pages(GFP_KERNEL, 1))
-#define free_thread_info(ti)   free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti)    get_task_struct((ti)->task)
-#define put_thread_info(ti)    put_task_struct((ti)->task)
-#endif /* __ASSEMBLY__ */
-
-/*
- * Size of kernel stack for each process.
- */
-#define THREAD_SIZE            8192    /* 2 pages */
-
-#define PREEMPT_ACTIVE         0x10000000
-
-/*
- * thread information flag bit numbers
- */
-#define TIF_SYSCALL_TRACE      0       /* syscall trace active */
-#define TIF_NOTIFY_RESUME      1       /* resumption notification requested */
-#define TIF_SIGPENDING         2       /* signal pending */
-#define TIF_NEED_RESCHED       3       /* rescheduling necessary */
-#define TIF_POLLING_NRFLAG     4       /* true if poll_idle() is polling
-                                          TIF_NEED_RESCHED */
-#define TIF_MEMDIE             5
-#define TIF_SYSCALL_AUDIT       6       /* syscall auditing active */
-#define TIF_SECCOMP             7      /* secure computing */
-
-/* as above, but as bit values */
-#define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
-#define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
-#define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
-#define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
-#define _TIF_SYSCALL_AUDIT      (1<<TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP            (1<<TIF_SECCOMP)
-
-#define _TIF_SYSCALL_T_OR_A     (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
-
-/*
- * Non racy (local) flags bit numbers
- */
-#define TIFL_FORCE_NOERROR     0       /* don't return error from current
-                                          syscall even if result < 0 */
-
-/* as above, but as bit values */
-#define _TIFL_FORCE_NOERROR    (1<<TIFL_FORCE_NOERROR)
-
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-ppc/types.h b/include/asm-ppc/types.h
deleted file mode 100644 (file)
index 77dc24d..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-#ifndef _PPC_TYPES_H
-#define _PPC_TYPES_H
-
-#ifndef __ASSEMBLY__
-
-typedef __signed__ char __s8;
-typedef unsigned char __u8;
-
-typedef __signed__ short __s16;
-typedef unsigned short __u16;
-
-typedef __signed__ int __s32;
-typedef unsigned int __u32;
-
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
-typedef __signed__ long long __s64;
-typedef unsigned long long __u64;
-#endif
-
-typedef struct {
-       __u32 u[4];
-} __vector128;
-
-/*
- * XXX allowed outside of __KERNEL__ for now, until glibc gets
- * a proper set of asm headers of its own.  -- paulus
- */
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-#define BITS_PER_LONG 32
-
-#ifndef __ASSEMBLY__
-
-#include <linux/config.h>
-
-typedef signed char s8;
-typedef unsigned char u8;
-
-typedef signed short s16;
-typedef unsigned short u16;
-
-typedef signed int s32;
-typedef unsigned int u32;
-
-typedef signed long long s64;
-typedef unsigned long long u64;
-
-typedef __vector128 vector128;
-
-/* DMA addresses are 32-bits wide */
-typedef u32 dma_addr_t;
-typedef u64 dma64_addr_t;
-
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
-#endif
diff --git a/include/asm-ppc/vga.h b/include/asm-ppc/vga.h
deleted file mode 100644 (file)
index c586473..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *     Access to VGA videoram
- *
- *     (c) 1998 Martin Mares <mj@ucw.cz>
- */
-
-#ifdef __KERNEL__
-#ifndef _LINUX_ASM_VGA_H_
-#define _LINUX_ASM_VGA_H_
-
-#include <asm/io.h>
-
-#include <linux/config.h>
-
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
-
-#define VT_BUF_HAVE_RW
-/*
- *  These are only needed for supporting VGA or MDA text mode, which use little
- *  endian byte ordering.
- *  In other cases, we can optimize by using native byte ordering and
- *  <linux/vt_buffer.h> has already done the right job for us.
- */
-
-extern inline void scr_writew(u16 val, volatile u16 *addr)
-{
-    st_le16(addr, val);
-}
-
-extern inline u16 scr_readw(volatile const u16 *addr)
-{
-    return ld_le16(addr);
-}
-
-#define VT_BUF_HAVE_MEMCPYW
-#define scr_memcpyw    memcpy
-
-#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
-
-extern unsigned long vgacon_remap_base;
-#define VGA_MAP_MEM(x) (x + vgacon_remap_base)
-#define vga_readb(x) (*(x))
-#define vga_writeb(x,y) (*(y) = (x))
-
-#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/xmon.h b/include/asm-ppc/xmon.h
deleted file mode 100644 (file)
index 042b83e..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __PPC_XMON_H
-#define __PPC_XMON_H
-#ifdef __KERNEL__
-
-struct pt_regs;
-
-extern void xmon(struct pt_regs *excp);
-extern void xmon_printf(const char *fmt, ...);
-extern void xmon_map_scc(void);
-extern int xmon_bpt(struct pt_regs *regs);
-extern int xmon_sstep(struct pt_regs *regs);
-extern int xmon_iabr_match(struct pt_regs *regs);
-extern int xmon_dabr_match(struct pt_regs *regs);
-extern void (*xmon_fault_handler)(struct pt_regs *regs);
-
-#endif
-#endif
index 84c24d4..dc3fc3f 100644 (file)
@@ -63,4 +63,11 @@ static inline unsigned long phys_to_abs(unsigned long pa)
 #define virt_to_abs(va) phys_to_abs(__pa(va))
 #define abs_to_virt(aa) __va(aa)
 
+/*
+ * Converts Virtual Address to Real Address for
+ * Legacy iSeries Hypervisor calls
+ */
+#define iseries_hv_addr(virtaddr)      \
+       (0x8000000000000000 | virt_to_abs(virtaddr))
+
 #endif /* _ABS_ADDR_H */
diff --git a/include/asm-ppc64/atomic.h b/include/asm-ppc64/atomic.h
deleted file mode 100644 (file)
index 0e5f25e..0000000
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * PowerPC64 atomic operations
- *
- * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
- * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ASM_PPC64_ATOMIC_H_ 
-#define _ASM_PPC64_ATOMIC_H_
-
-#include <asm/memory.h>
-
-typedef struct { volatile int counter; } atomic_t;
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v)         ((v)->counter)
-#define atomic_set(v,i)                (((v)->counter) = (i))
-
-static __inline__ void atomic_add(int a, atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-"1:    lwarx   %0,0,%3         # atomic_add\n\
-       add     %0,%2,%0\n\
-       stwcx.  %0,0,%3\n\
-       bne-    1b"
-       : "=&r" (t), "=m" (v->counter)
-       : "r" (a), "r" (&v->counter), "m" (v->counter)
-       : "cc");
-}
-
-static __inline__ int atomic_add_return(int a, atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-       EIEIO_ON_SMP
-"1:    lwarx   %0,0,%2         # atomic_add_return\n\
-       add     %0,%1,%0\n\
-       stwcx.  %0,0,%2\n\
-       bne-    1b"
-       ISYNC_ON_SMP
-       : "=&r" (t)
-       : "r" (a), "r" (&v->counter)
-       : "cc", "memory");
-
-       return t;
-}
-
-#define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
-
-static __inline__ void atomic_sub(int a, atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-"1:    lwarx   %0,0,%3         # atomic_sub\n\
-       subf    %0,%2,%0\n\
-       stwcx.  %0,0,%3\n\
-       bne-    1b"
-       : "=&r" (t), "=m" (v->counter)
-       : "r" (a), "r" (&v->counter), "m" (v->counter)
-       : "cc");
-}
-
-static __inline__ int atomic_sub_return(int a, atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-       EIEIO_ON_SMP
-"1:    lwarx   %0,0,%2         # atomic_sub_return\n\
-       subf    %0,%1,%0\n\
-       stwcx.  %0,0,%2\n\
-       bne-    1b"
-       ISYNC_ON_SMP
-       : "=&r" (t)
-       : "r" (a), "r" (&v->counter)
-       : "cc", "memory");
-
-       return t;
-}
-
-static __inline__ void atomic_inc(atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-"1:    lwarx   %0,0,%2         # atomic_inc\n\
-       addic   %0,%0,1\n\
-       stwcx.  %0,0,%2\n\
-       bne-    1b"
-       : "=&r" (t), "=m" (v->counter)
-       : "r" (&v->counter), "m" (v->counter)
-       : "cc");
-}
-
-static __inline__ int atomic_inc_return(atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-       EIEIO_ON_SMP
-"1:    lwarx   %0,0,%1         # atomic_inc_return\n\
-       addic   %0,%0,1\n\
-       stwcx.  %0,0,%1\n\
-       bne-    1b"
-       ISYNC_ON_SMP
-       : "=&r" (t)
-       : "r" (&v->counter)
-       : "cc", "memory");
-
-       return t;
-}
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-static __inline__ void atomic_dec(atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-"1:    lwarx   %0,0,%2         # atomic_dec\n\
-       addic   %0,%0,-1\n\
-       stwcx.  %0,0,%2\n\
-       bne-    1b"
-       : "=&r" (t), "=m" (v->counter)
-       : "r" (&v->counter), "m" (v->counter)
-       : "cc");
-}
-
-static __inline__ int atomic_dec_return(atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-       EIEIO_ON_SMP
-"1:    lwarx   %0,0,%1         # atomic_dec_return\n\
-       addic   %0,%0,-1\n\
-       stwcx.  %0,0,%1\n\
-       bne-    1b"
-       ISYNC_ON_SMP
-       : "=&r" (t)
-       : "r" (&v->counter)
-       : "cc", "memory");
-
-       return t;
-}
-
-#define atomic_sub_and_test(a, v)      (atomic_sub_return((a), (v)) == 0)
-#define atomic_dec_and_test(v)         (atomic_dec_return((v)) == 0)
-
-/*
- * Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
- */
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-       EIEIO_ON_SMP
-"1:    lwarx   %0,0,%1         # atomic_dec_if_positive\n\
-       addic.  %0,%0,-1\n\
-       blt-    2f\n\
-       stwcx.  %0,0,%1\n\
-       bne-    1b"
-       ISYNC_ON_SMP
-       "\n\
-2:"    : "=&r" (t)
-       : "r" (&v->counter)
-       : "cc", "memory");
-
-       return t;
-}
-
-#define smp_mb__before_atomic_dec()     smp_mb()
-#define smp_mb__after_atomic_dec()      smp_mb()
-#define smp_mb__before_atomic_inc()     smp_mb()
-#define smp_mb__after_atomic_inc()      smp_mb()
-
-#endif /* _ASM_PPC64_ATOMIC_H_ */
index a0f8312..dbfa42e 100644 (file)
@@ -42,7 +42,7 @@
 
 #ifdef __KERNEL__
 
-#include <asm/memory.h>
+#include <asm/synch.h>
 
 /*
  * clear_bit doesn't imply a memory barrier
index 67aef0c..71cce36 100644 (file)
@@ -15,6 +15,7 @@ extern int boot_text_mapped;
 extern int btext_initialize(struct device_node *np);
 
 extern void map_boot_text(void);
+extern void init_boot_display(void);
 extern void btext_update_display(unsigned long phys, int width, int height,
                                 int depth, int pitch);
 
diff --git a/include/asm-ppc64/cputable.h b/include/asm-ppc64/cputable.h
deleted file mode 100644 (file)
index acc9b4d..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- *  include/asm-ppc64/cputable.h
- *
- *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
- *
- *  Modifications for ppc64:
- *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
- * 
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#ifndef __ASM_PPC_CPUTABLE_H
-#define __ASM_PPC_CPUTABLE_H
-
-#include <linux/config.h>
-#include <asm/page.h> /* for ASM_CONST */
-
-/* Exposed to userland CPU features - Must match ppc32 definitions */
-#define PPC_FEATURE_32                 0x80000000
-#define PPC_FEATURE_64                 0x40000000
-#define PPC_FEATURE_601_INSTR          0x20000000
-#define PPC_FEATURE_HAS_ALTIVEC                0x10000000
-#define PPC_FEATURE_HAS_FPU            0x08000000
-#define PPC_FEATURE_HAS_MMU            0x04000000
-#define PPC_FEATURE_HAS_4xxMAC         0x02000000
-#define PPC_FEATURE_UNIFIED_CACHE      0x01000000
-
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-
-/* This structure can grow, it's real size is used by head.S code
- * via the mkdefs mechanism.
- */
-struct cpu_spec;
-struct op_ppc64_model;
-
-typedef        void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
-
-struct cpu_spec {
-       /* CPU is matched via (PVR & pvr_mask) == pvr_value */
-       unsigned int    pvr_mask;
-       unsigned int    pvr_value;
-
-       char            *cpu_name;
-       unsigned long   cpu_features;           /* Kernel features */
-       unsigned int    cpu_user_features;      /* Userland features */
-
-       /* cache line sizes */
-       unsigned int    icache_bsize;
-       unsigned int    dcache_bsize;
-
-       /* number of performance monitor counters */
-       unsigned int    num_pmcs;
-
-       /* this is called to initialize various CPU bits like L1 cache,
-        * BHT, SPD, etc... from head.S before branching to identify_machine
-        */
-       cpu_setup_t     cpu_setup;
-
-       /* Used by oprofile userspace to select the right counters */
-       char            *oprofile_cpu_type;
-
-       /* Processor specific oprofile operations */
-       struct op_ppc64_model *oprofile_model;
-};
-
-extern struct cpu_spec         cpu_specs[];
-extern struct cpu_spec         *cur_cpu_spec;
-
-static inline unsigned long cpu_has_feature(unsigned long feature)
-{
-       return cur_cpu_spec->cpu_features & feature;
-}
-
-#endif /* __ASSEMBLY__ */
-
-/* CPU kernel features */
-
-/* Retain the 32b definitions for the time being - use bottom half of word */
-#define CPU_FTR_SPLIT_ID_CACHE         ASM_CONST(0x0000000000000001)
-#define CPU_FTR_L2CR                   ASM_CONST(0x0000000000000002)
-#define CPU_FTR_SPEC7450               ASM_CONST(0x0000000000000004)
-#define CPU_FTR_ALTIVEC                        ASM_CONST(0x0000000000000008)
-#define CPU_FTR_TAU                    ASM_CONST(0x0000000000000010)
-#define CPU_FTR_CAN_DOZE               ASM_CONST(0x0000000000000020)
-#define CPU_FTR_USE_TB                 ASM_CONST(0x0000000000000040)
-#define CPU_FTR_604_PERF_MON           ASM_CONST(0x0000000000000080)
-#define CPU_FTR_601                    ASM_CONST(0x0000000000000100)
-#define CPU_FTR_HPTE_TABLE             ASM_CONST(0x0000000000000200)
-#define CPU_FTR_CAN_NAP                        ASM_CONST(0x0000000000000400)
-#define CPU_FTR_L3CR                   ASM_CONST(0x0000000000000800)
-#define CPU_FTR_L3_DISABLE_NAP         ASM_CONST(0x0000000000001000)
-#define CPU_FTR_NAP_DISABLE_L2_PR      ASM_CONST(0x0000000000002000)
-#define CPU_FTR_DUAL_PLL_750FX         ASM_CONST(0x0000000000004000)
-
-/* Add the 64b processor unique features in the top half of the word */
-#define CPU_FTR_SLB                    ASM_CONST(0x0000000100000000)
-#define CPU_FTR_16M_PAGE               ASM_CONST(0x0000000200000000)
-#define CPU_FTR_TLBIEL                         ASM_CONST(0x0000000400000000)
-#define CPU_FTR_NOEXECUTE              ASM_CONST(0x0000000800000000)
-#define CPU_FTR_NODSISRALIGN           ASM_CONST(0x0000001000000000)
-#define CPU_FTR_IABR                   ASM_CONST(0x0000002000000000)
-#define CPU_FTR_MMCRA                          ASM_CONST(0x0000004000000000)
-/* unused                              ASM_CONST(0x0000008000000000) */
-#define CPU_FTR_SMT                    ASM_CONST(0x0000010000000000)
-#define CPU_FTR_COHERENT_ICACHE        ASM_CONST(0x0000020000000000)
-#define CPU_FTR_LOCKLESS_TLBIE         ASM_CONST(0x0000040000000000)
-#define CPU_FTR_MMCRA_SIHV             ASM_CONST(0x0000080000000000)
-#define CPU_FTR_CTRL                   ASM_CONST(0x0000100000000000)
-
-#ifndef __ASSEMBLY__
-
-#define COMMON_USER_PPC64      (PPC_FEATURE_32 | PPC_FEATURE_64 | \
-                                PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU)
-
-#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
-                                 CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
-                                 CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL)
-
-/* iSeries doesn't support large pages */
-#ifdef CONFIG_PPC_ISERIES
-#define CPU_FTR_PPCAS_ARCH_V2  (CPU_FTR_PPCAS_ARCH_V2_BASE)
-#else
-#define CPU_FTR_PPCAS_ARCH_V2  (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
-#endif /* CONFIG_PPC_ISERIES */
-
-#endif /* __ASSEMBLY */
-
-#ifdef __ASSEMBLY__
-
-#define BEGIN_FTR_SECTION              98:
-
-#define END_FTR_SECTION(msk, val)              \
-99:                                            \
-       .section __ftr_fixup,"a";               \
-       .align 3;                               \
-       .llong msk;                             \
-       .llong val;                             \
-       .llong 98b;                             \
-       .llong 99b;                             \
-       .previous
-
-#else
-
-#define BEGIN_FTR_SECTION              "98:\n"
-#define END_FTR_SECTION(msk, val)              \
-"99:\n"                                                \
-"      .section __ftr_fixup,\"a\";\n"          \
-"      .align 3;\n"                            \
-"      .llong "#msk";\n"                       \
-"      .llong "#val";\n"                       \
-"      .llong 98b;\n"                          \
-"      .llong 99b;\n"                          \
-"      .previous\n"
-
-#endif /* __ASSEMBLY__ */
-
-#define END_FTR_SECTION_IFSET(msk)     END_FTR_SECTION((msk), (msk))
-#define END_FTR_SECTION_IFCLR(msk)     END_FTR_SECTION((msk), 0)
-
-#endif /* __ASM_PPC_CPUTABLE_H */
-#endif /* __KERNEL__ */
-
diff --git a/include/asm-ppc64/dart.h b/include/asm-ppc64/dart.h
new file mode 100644 (file)
index 0000000..cdf8a2d
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _ASM_DART_H
+#define _ASM_DART_H
+
+
+/* physical base of DART registers */
+#define DART_BASE        0xf8033000UL
+
+/* Offset from base to control register */
+#define DARTCNTL   0
+/* Offset from base to exception register */
+#define DARTEXCP   0x10
+/* Offset from base to TLB tag registers */
+#define DARTTAG    0x1000
+
+
+/* Control Register fields */
+
+/* base address of table (pfn) */
+#define DARTCNTL_BASE_MASK    0xfffff
+#define DARTCNTL_BASE_SHIFT   12
+
+#define DARTCNTL_FLUSHTLB     0x400
+#define DARTCNTL_ENABLE       0x200
+
+/* size of table in pages */
+#define DARTCNTL_SIZE_MASK    0x1ff
+#define DARTCNTL_SIZE_SHIFT   0
+
+
+/* DART table fields */
+
+#define DARTMAP_VALID   0x80000000
+#define DARTMAP_RPNMASK 0x00ffffff
+
+
+#define DART_PAGE_SHIFT                12
+#define DART_PAGE_SIZE         (1 << DART_PAGE_SHIFT)
+#define DART_PAGE_FACTOR       (PAGE_SHIFT - DART_PAGE_SHIFT)
+
+
+#endif
diff --git a/include/asm-ppc64/dbdma.h b/include/asm-ppc64/dbdma.h
deleted file mode 100644 (file)
index f2d5d5d..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <asm-ppc/dbdma.h>
-
diff --git a/include/asm-ppc64/dma.h b/include/asm-ppc64/dma.h
deleted file mode 100644 (file)
index dfd1f69..0000000
+++ /dev/null
@@ -1,329 +0,0 @@
-/* 
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
- * Written by Hennus Bergman, 1992.
- * High DMA channel support & info by Hannu Savolainen
- * and John Boyd, Nov. 1992.
- * Changes for ppc sound by Christoph Nadig
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ASM_DMA_H
-#define _ASM_DMA_H
-
-#include <linux/config.h>
-#include <asm/io.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-
-#ifndef MAX_DMA_CHANNELS
-#define MAX_DMA_CHANNELS       8
-#endif
-
-/* The maximum address that we can perform a DMA transfer to on this platform */
-/* Doesn't really apply... */
-#define MAX_DMA_ADDRESS  (~0UL)
-
-#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
-
-#define dma_outb       outb
-#define dma_inb                inb
-
-/*
- * NOTES about DMA transfers:
- *
- *  controller 1: channels 0-3, byte operations, ports 00-1F
- *  controller 2: channels 4-7, word operations, ports C0-DF
- *
- *  - ALL registers are 8 bits only, regardless of transfer size
- *  - channel 4 is not used - cascades 1 into 2.
- *  - channels 0-3 are byte - addresses/counts are for physical bytes
- *  - channels 5-7 are word - addresses/counts are for physical words
- *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
- *  - transfer count loaded to registers is 1 less than actual count
- *  - controller 2 offsets are all even (2x offsets for controller 1)
- *  - page registers for 5-7 don't use data bit 0, represent 128K pages
- *  - page registers for 0-3 use bit 0, represent 64K pages
- *
- * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.  
- * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
- * Note that addresses loaded into registers must be _physical_ addresses,
- * not logical addresses (which may differ if paging is active).
- *
- *  Address mapping for channels 0-3:
- *
- *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
- *    |  ...  |   |  ... |   |  ... |
- *    |  ...  |   |  ... |   |  ... |
- *    |  ...  |   |  ... |   |  ... |
- *   P7  ...  P0  A7 ... A0  A7 ... A0   
- * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
- *
- *  Address mapping for channels 5-7:
- *
- *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
- *    |  ...  |   \   \   ... \  \  \  ... \  \
- *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
- *    |  ...  |     \   \   ... \  \  \  ... \
- *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0   
- * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
- *
- * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
- * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
- * the hardware level, so odd-byte transfers aren't possible).
- *
- * Transfer count (_not # bytes_) is limited to 64K, represented as actual
- * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
- * and up to 128K bytes may be transferred on channels 5-7 in one operation. 
- *
- */
-
-/* 8237 DMA controllers */
-#define IO_DMA1_BASE   0x00    /* 8 bit slave DMA, channels 0..3 */
-#define IO_DMA2_BASE   0xC0    /* 16 bit master DMA, ch 4(=slave input)..7 */
-
-/* DMA controller registers */
-#define DMA1_CMD_REG           0x08    /* command register (w) */
-#define DMA1_STAT_REG          0x08    /* status register (r) */
-#define DMA1_REQ_REG            0x09    /* request register (w) */
-#define DMA1_MASK_REG          0x0A    /* single-channel mask (w) */
-#define DMA1_MODE_REG          0x0B    /* mode register (w) */
-#define DMA1_CLEAR_FF_REG      0x0C    /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG           0x0D    /* Temporary Register (r) */
-#define DMA1_RESET_REG         0x0D    /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
-#define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */
-
-#define DMA2_CMD_REG           0xD0    /* command register (w) */
-#define DMA2_STAT_REG          0xD0    /* status register (r) */
-#define DMA2_REQ_REG            0xD2    /* request register (w) */
-#define DMA2_MASK_REG          0xD4    /* single-channel mask (w) */
-#define DMA2_MODE_REG          0xD6    /* mode register (w) */
-#define DMA2_CLEAR_FF_REG      0xD8    /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG           0xDA    /* Temporary Register (r) */
-#define DMA2_RESET_REG         0xDA    /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
-#define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */
-
-#define DMA_ADDR_0              0x00    /* DMA address registers */
-#define DMA_ADDR_1              0x02
-#define DMA_ADDR_2              0x04
-#define DMA_ADDR_3              0x06
-#define DMA_ADDR_4              0xC0
-#define DMA_ADDR_5              0xC4
-#define DMA_ADDR_6              0xC8
-#define DMA_ADDR_7              0xCC
-
-#define DMA_CNT_0               0x01    /* DMA count registers */
-#define DMA_CNT_1               0x03
-#define DMA_CNT_2               0x05
-#define DMA_CNT_3               0x07
-#define DMA_CNT_4               0xC2
-#define DMA_CNT_5               0xC6
-#define DMA_CNT_6               0xCA
-#define DMA_CNT_7               0xCE
-
-#define DMA_LO_PAGE_0              0x87    /* DMA page registers */
-#define DMA_LO_PAGE_1              0x83
-#define DMA_LO_PAGE_2              0x81
-#define DMA_LO_PAGE_3              0x82
-#define DMA_LO_PAGE_5              0x8B
-#define DMA_LO_PAGE_6              0x89
-#define DMA_LO_PAGE_7              0x8A
-
-#define DMA_HI_PAGE_0              0x487    /* DMA page registers */
-#define DMA_HI_PAGE_1              0x483
-#define DMA_HI_PAGE_2              0x481
-#define DMA_HI_PAGE_3              0x482
-#define DMA_HI_PAGE_5              0x48B
-#define DMA_HI_PAGE_6              0x489
-#define DMA_HI_PAGE_7              0x48A
-
-#define DMA1_EXT_REG               0x40B
-#define DMA2_EXT_REG               0x4D6
-
-#define DMA_MODE_READ  0x44    /* I/O to memory, no autoinit, increment, single mode */
-#define DMA_MODE_WRITE 0x48    /* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0   /* pass thru DREQ->HRQ, DACK<-HLDA only */
-
-#define DMA_AUTOINIT            0x10
-
-extern spinlock_t  dma_spin_lock;
-
-static __inline__ unsigned long claim_dma_lock(void)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&dma_spin_lock, flags);
-       return flags;
-}
-
-static __inline__ void release_dma_lock(unsigned long flags)
-{
-       spin_unlock_irqrestore(&dma_spin_lock, flags);
-}
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
-       unsigned char ucDmaCmd=0x00;
-
-       if (dmanr != 4)
-       {
-               dma_outb(0, DMA2_MASK_REG);  /* This may not be enabled */
-               dma_outb(ucDmaCmd, DMA2_CMD_REG);  /* Enable group */
-       }
-       if (dmanr<=3)
-       {
-               dma_outb(dmanr,  DMA1_MASK_REG);
-               dma_outb(ucDmaCmd, DMA1_CMD_REG);  /* Enable group */
-       } else
-       {
-               dma_outb(dmanr & 3,  DMA2_MASK_REG);
-       }
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
-       if (dmanr<=3)
-               dma_outb(dmanr | 4,  DMA1_MASK_REG);
-       else
-               dma_outb((dmanr & 3) | 4,  DMA2_MASK_REG);
-}
-
-/* Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while interrupts are disabled! ---
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
-       if (dmanr<=3)
-               dma_outb(0,  DMA1_CLEAR_FF_REG);
-       else
-               dma_outb(0,  DMA2_CLEAR_FF_REG);
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
-       if (dmanr<=3)
-               dma_outb(mode | dmanr,  DMA1_MODE_REG);
-       else
-               dma_outb(mode | (dmanr&3),  DMA2_MODE_REG);
-}
-
-/* Set only the page register bits of the transfer address.
- * This is used for successive transfers when we know the contents of
- * the lower 16 bits of the DMA current address register, but a 64k boundary
- * may have been crossed.
- */
-static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
-{
-       switch(dmanr) {
-               case 0:
-                       dma_outb(pagenr, DMA_LO_PAGE_0);
-                        dma_outb(pagenr>>8, DMA_HI_PAGE_0);
-                       break;
-               case 1:
-                       dma_outb(pagenr, DMA_LO_PAGE_1);
-                        dma_outb(pagenr>>8, DMA_HI_PAGE_1);
-                       break;
-               case 2:
-                       dma_outb(pagenr, DMA_LO_PAGE_2);
-                       dma_outb(pagenr>>8, DMA_HI_PAGE_2); 
-                       break;
-               case 3:
-                       dma_outb(pagenr, DMA_LO_PAGE_3);
-                       dma_outb(pagenr>>8, DMA_HI_PAGE_3); 
-                       break;
-               case 5:
-                       dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
-                        dma_outb(pagenr>>8, DMA_HI_PAGE_5);
-                       break;
-               case 6:
-                       dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
-                       dma_outb(pagenr>>8, DMA_HI_PAGE_6);
-                       break;
-               case 7:
-                       dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
-                       dma_outb(pagenr>>8, DMA_HI_PAGE_7);
-                 break;
-       }
-}
-
-
-/* Set transfer address & page bits for specific DMA channel.
- * Assumes dma flipflop is clear.
- */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
-{
-       if (dmanr <= 3)  {
-           dma_outb( phys & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
-            dma_outb( (phys>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
-       }  else  {
-           dma_outb( (phys>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
-           dma_outb( (phys>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
-       }
-       set_dma_page(dmanr, phys>>16);
-}
-
-
-/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
- * a specific DMA channel.
- * You must ensure the parameters are valid.
- * NOTE: from a manual: "the number of transfers is one more
- * than the initial word count"! This is taken into account.
- * Assumes dma flip-flop is clear.
- * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
-        count--;
-       if (dmanr <= 3)  {
-           dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
-           dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
-        } else {
-           dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
-           dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
-        }
-}
-
-
-/* Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * If called before the channel has been used, it may return 1.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- *
- * Assumes DMA flip-flop is clear.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
-       unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
-                                        : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
-
-       /* using short to get 16-bit wrap around */
-       unsigned short count;
-
-       count = 1 + dma_inb(io_port);
-       count += dma_inb(io_port) << 8;
-       
-       return (dmanr <= 3)? count : (count<<1);
-}
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char * device_id);    /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr);      /* release it again */
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;                                        
-#else                                                         
-#define isa_dma_bridge_buggy   (0)
-#endif
-#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
-#endif /* _ASM_DMA_H */
index cb2640b..266b460 100644 (file)
@@ -5,7 +5,7 @@
 
 #include <linux/futex.h>
 #include <asm/errno.h>
-#include <asm/memory.h>
+#include <asm/synch.h>
 #include <asm/uaccess.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
diff --git a/include/asm-ppc64/hardirq.h b/include/asm-ppc64/hardirq.h
deleted file mode 100644 (file)
index 4ee72bb..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __ASM_HARDIRQ_H
-#define __ASM_HARDIRQ_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/cache.h>
-#include <linux/preempt.h>
-
-typedef struct {
-       unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-static inline void ack_bad_irq(int irq)
-{
-       printk(KERN_CRIT "illegal vector %d received!\n", irq);
-       BUG();
-}
-
-#endif /* __ASM_HARDIRQ_H */
index 59c958a..bd7c953 100644 (file)
@@ -15,7 +15,7 @@
 #ifdef CONFIG_PPC_ISERIES 
 #include <asm/iSeries/iSeries_io.h>
 #endif  
-#include <asm/memory.h>
+#include <asm/synch.h>
 #include <asm/delay.h>
 
 #include <asm-generic/iomap.h>
diff --git a/include/asm-ppc64/irq.h b/include/asm-ppc64/irq.h
deleted file mode 100644 (file)
index 99782af..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _ASM_IRQ_H
-#define _ASM_IRQ_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/threads.h>
-
-/*
- * Maximum number of interrupt sources that we can handle.
- */
-#define NR_IRQS                512
-
-/* this number is used when no interrupt has been assigned */
-#define NO_IRQ                 (-1)
-
-/*
- * These constants are used for passing information about interrupt
- * signal polarity and level/edge sensing to the low-level PIC chip
- * drivers.
- */
-#define IRQ_SENSE_MASK         0x1
-#define IRQ_SENSE_LEVEL                0x1     /* interrupt on active level */
-#define IRQ_SENSE_EDGE         0x0     /* interrupt triggered by edge */
-
-#define IRQ_POLARITY_MASK      0x2
-#define IRQ_POLARITY_POSITIVE  0x2     /* high level or low->high edge */
-#define IRQ_POLARITY_NEGATIVE  0x0     /* low level or high->low edge */
-
-/*
- * IRQ line status macro IRQ_PER_CPU is used
- */
-#define ARCH_HAS_IRQ_PER_CPU
-
-#define get_irq_desc(irq) (&irq_desc[(irq)])
-
-/* Define a way to iterate across irqs. */
-#define for_each_irq(i) \
-       for ((i) = 0; (i) < NR_IRQS; ++(i))
-
-/* Interrupt numbers are virtual in case they are sparsely
- * distributed by the hardware.
- */
-extern unsigned int virt_irq_to_real_map[NR_IRQS];
-
-/* Create a mapping for a real_irq if it doesn't already exist.
- * Return the virtual irq as a convenience.
- */
-int virt_irq_create_mapping(unsigned int real_irq);
-void virt_irq_init(void);
-
-static inline unsigned int virt_irq_to_real(unsigned int virt_irq)
-{
-       return virt_irq_to_real_map[virt_irq];
-}
-
-extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
-
-/*
- * Because many systems have two overlapping names spaces for
- * interrupts (ISA and XICS for example), and the ISA interrupts
- * have historically not been easy to renumber, we allow ISA
- * interrupts to take values 0 - 15, and shift up the remaining
- * interrupts by 0x10.
- */
-#define NUM_ISA_INTERRUPTS     0x10
-extern int __irq_offset_value;
-
-static inline int irq_offset_up(int irq)
-{
-       return(irq + __irq_offset_value);
-}
-
-static inline int irq_offset_down(int irq)
-{
-       return(irq - __irq_offset_value);
-}
-
-static inline int irq_offset_value(void)
-{
-       return __irq_offset_value;
-}
-
-static __inline__ int irq_canonicalize(int irq)
-{
-       return irq;
-}
-
-extern int distribute_irqs;
-
-struct irqaction;
-struct pt_regs;
-
-#ifdef CONFIG_IRQSTACKS
-/*
- * Per-cpu stacks for handling hard and soft interrupts.
- */
-extern struct thread_info *hardirq_ctx[NR_CPUS];
-extern struct thread_info *softirq_ctx[NR_CPUS];
-
-extern void irq_ctx_init(void);
-extern void call_do_softirq(struct thread_info *tp);
-extern int call_handle_IRQ_event(int irq, struct pt_regs *regs,
-                       struct irqaction *action, struct thread_info *tp);
-
-#define __ARCH_HAS_DO_SOFTIRQ
-
-#else
-#define irq_ctx_init()
-
-#endif /* CONFIG_IRQSTACKS */
-
-#endif /* _ASM_IRQ_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/keylargo.h b/include/asm-ppc64/keylargo.h
deleted file mode 100644 (file)
index 4d78e3d..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <asm-ppc/keylargo.h>
-
diff --git a/include/asm-ppc64/kmap_types.h b/include/asm-ppc64/kmap_types.h
deleted file mode 100644 (file)
index fd15746..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BIO_SRC_IRQ,
-       KM_BIO_DST_IRQ,
-       KM_PTE0,
-       KM_PTE1,
-       KM_IRQ0,
-       KM_IRQ1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,    
-       KM_TYPE_NR
-};
-
-#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/macio.h b/include/asm-ppc64/macio.h
deleted file mode 100644 (file)
index a3028b3..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <asm-ppc/macio.h>
-
diff --git a/include/asm-ppc64/memory.h b/include/asm-ppc64/memory.h
deleted file mode 100644 (file)
index af53ffb..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef _ASM_PPC64_MEMORY_H_ 
-#define _ASM_PPC64_MEMORY_H_ 
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-
-/*
- * Arguably the bitops and *xchg operations don't imply any memory barrier
- * or SMP ordering, but in fact a lot of drivers expect them to imply
- * both, since they do on x86 cpus.
- */
-#ifdef CONFIG_SMP
-#define EIEIO_ON_SMP   "eieio\n"
-#define ISYNC_ON_SMP   "\n\tisync"
-#define SYNC_ON_SMP    "lwsync\n\t"
-#else
-#define EIEIO_ON_SMP
-#define ISYNC_ON_SMP
-#define SYNC_ON_SMP
-#endif
-
-static inline void eieio(void)
-{
-       __asm__ __volatile__ ("eieio" : : : "memory");
-}
-
-static inline void isync(void)
-{
-       __asm__ __volatile__ ("isync" : : : "memory");
-}
-
-#ifdef CONFIG_SMP
-#define eieio_on_smp() eieio()
-#define isync_on_smp() isync()
-#else
-#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
-#define isync_on_smp() __asm__ __volatile__("": : :"memory")
-#endif
-
-/* Macros for adjusting thread priority (hardware multi-threading) */
-#define HMT_very_low()    asm volatile("or 31,31,31   # very low priority")
-#define HMT_low()      asm volatile("or 1,1,1          # low priority")
-#define HMT_medium_low()  asm volatile("or 6,6,6      # medium low priority")
-#define HMT_medium()   asm volatile("or 2,2,2          # medium priority")
-#define HMT_medium_high() asm volatile("or 5,5,5      # medium high priority")
-#define HMT_high()     asm volatile("or 3,3,3          # high priority")
-
-#define HMT_VERY_LOW    "\tor   31,31,31        # very low priority\n"
-#define HMT_LOW                "\tor   1,1,1           # low priority\n"
-#define HMT_MEDIUM_LOW  "\tor   6,6,6           # medium low priority\n"
-#define HMT_MEDIUM     "\tor   2,2,2           # medium priority\n"
-#define HMT_MEDIUM_HIGH "\tor   5,5,5           # medium high priority\n"
-#define HMT_HIGH       "\tor   3,3,3           # high priority\n"
-
-#endif
index 7bc42eb..e0505ac 100644 (file)
@@ -14,6 +14,7 @@
 #define _PPC64_MMU_H_
 
 #include <linux/config.h>
+#include <asm/ppc_asm.h> /* for ASM_CONST */
 #include <asm/page.h>
 
 /*
@@ -29,7 +30,7 @@
 
 /* Location of cpu0's segment table */
 #define STAB0_PAGE     0x6
-#define STAB0_PHYS_ADDR        (STAB0_PAGE<<PAGE_SHIFT)
+#define STAB0_PHYS_ADDR        (STAB0_PAGE<<12)
 
 #ifndef __ASSEMBLY__
 extern char initial_stab[];
@@ -205,6 +206,10 @@ extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
                               unsigned long prpn,
                               unsigned long vflags, unsigned long rflags);
 
+extern long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
+               unsigned long va, unsigned long prpn,
+               unsigned long vflags, unsigned long rflags);
+
 extern void stabs_alloc(void);
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/asm-ppc64/of_device.h b/include/asm-ppc64/of_device.h
deleted file mode 100644 (file)
index 7bc136e..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <asm-ppc/of_device.h>
-
index a15422b..d404431 100644 (file)
  */
 
 #include <linux/config.h>
-
-#ifdef __ASSEMBLY__
-  #define ASM_CONST(x) x
-#else
-  #define __ASM_CONST(x) x##UL
-  #define ASM_CONST(x) __ASM_CONST(x)
-#endif
+#include <asm/ppc_asm.h> /* for ASM_CONST */
 
 /* PAGE_SHIFT determines the page size */
 #define PAGE_SHIFT     12
index d899138..60cf8c8 100644 (file)
@@ -2,7 +2,9 @@
 #ifndef _ASM_PCI_BRIDGE_H
 #define _ASM_PCI_BRIDGE_H
 
+#include <linux/config.h>
 #include <linux/pci.h>
+#include <linux/list.h>
 
 /*
  * This program is free software; you can redistribute it and/or
@@ -34,7 +36,7 @@ struct pci_controller {
 
        struct pci_ops *ops;
        volatile unsigned int __iomem *cfg_addr;
-       volatile unsigned char __iomem *cfg_data;
+       volatile void __iomem *cfg_data;
 
        /* Currently, we limit ourselves to 1 IO range and 3 mem
         * ranges since the common pci_bus structure can't handle more
@@ -71,6 +73,12 @@ struct pci_dn {
        struct  iommu_table *iommu_table;       /* for phb's or bridges */
        struct  pci_dev *pcidev;        /* back-pointer to the pci device */
        struct  device_node *node;      /* back-pointer to the device_node */
+#ifdef CONFIG_PPC_ISERIES
+       struct  list_head Device_List;
+       int             Irq;            /* Assigned IRQ */
+       int             Flags;          /* Possible flags(disable/bist)*/
+       u8              LogicalSlot;    /* Hv Slot Index for Tces */
+#endif
        u32     config_space[16];       /* saved PCI config space */
 };
 
@@ -96,6 +104,16 @@ static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
        return fetch_dev_dn(dev);
 }
 
+static inline int pci_device_from_OF_node(struct device_node *np,
+                                         u8 *bus, u8 *devfn)
+{
+       if (!PCI_DN(np))
+               return -ENODEV;
+       *bus = PCI_DN(np)->busno;
+       *devfn = PCI_DN(np)->devfn;
+       return 0;
+}
+
 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
 {
        if (bus->self)
@@ -105,7 +123,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
 }
 
 extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
-                                        struct device_node *dev);
+                                        struct device_node *dev, int primary);
 
 extern int pcibios_remove_root_bus(struct pci_controller *phb);
 
diff --git a/include/asm-ppc64/pmac_feature.h b/include/asm-ppc64/pmac_feature.h
deleted file mode 100644 (file)
index e07e36c..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <asm-ppc/pmac_feature.h>
-
diff --git a/include/asm-ppc64/pmac_low_i2c.h b/include/asm-ppc64/pmac_low_i2c.h
deleted file mode 100644 (file)
index 7bcfc72..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <asm-ppc/pmac_low_i2c.h>
-
index 6b44a8c..3945a55 100644 (file)
@@ -70,18 +70,18 @@ typedef struct compat_siginfo {
 #define __old_sigaction32      old_sigaction32
 
 struct __old_sigaction32 {
-       unsigned                sa_handler;
+       compat_uptr_t           sa_handler;
        compat_old_sigset_t     sa_mask;
        unsigned int            sa_flags;
-       unsigned                sa_restorer;     /* not used by Linux/SPARC yet */
+       compat_uptr_t           sa_restorer;     /* not used by Linux/SPARC yet */
 };
 
 
 
 struct sigaction32 {
-       unsigned int  sa_handler;       /* Really a pointer, but need to deal with 32 bits */
+       compat_uptr_t  sa_handler;      /* Really a pointer, but need to deal with 32 bits */
        unsigned int sa_flags;
-       unsigned int sa_restorer;       /* Another 32 bit pointer */
+       compat_uptr_t sa_restorer;      /* Another 32 bit pointer */
        compat_sigset_t sa_mask;                /* A 32 bit mask */
 };
 
@@ -94,9 +94,9 @@ typedef struct sigaltstack_32 {
 struct sigcontext32 {
        unsigned int    _unused[4];
        int             signal;
-       unsigned int    handler;
+       compat_uptr_t   handler;
        unsigned int    oldmask;
-       u32 regs;  /* 4 byte pointer to the pt_regs32 structure. */
+       compat_uptr_t   regs;  /* 4 byte pointer to the pt_regs32 structure. */
 };
 
 struct mcontext32 {
@@ -111,7 +111,7 @@ struct ucontext32 {
        unsigned int            uc_link;
        stack_32_t              uc_stack;
        int                     uc_pad[7];
-       u32                     uc_regs;        /* points to uc_mcontext field */
+       compat_uptr_t           uc_regs;        /* points to uc_mcontext field */
        compat_sigset_t         uc_sigmask;     /* mask last for extensibility */
        /* glibc has 1024-bit signal masks, ours are 64-bit */
        int                     uc_maskext[30];
diff --git a/include/asm-ppc64/ppc_asm.h b/include/asm-ppc64/ppc_asm.h
deleted file mode 100644 (file)
index 9031d8a..0000000
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * arch/ppc64/kernel/ppc_asm.h
- *
- * Definitions used by various bits of low-level assembly code on PowerPC.
- *
- * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#ifndef _PPC64_PPC_ASM_H
-#define _PPC64_PPC_ASM_H
-/*
- * Macros for storing registers into and loading registers from
- * exception frames.
- */
-#define SAVE_GPR(n, base)      std     n,GPR0+8*(n)(base)
-#define SAVE_2GPRS(n, base)    SAVE_GPR(n, base); SAVE_GPR(n+1, base)
-#define SAVE_4GPRS(n, base)    SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
-#define SAVE_8GPRS(n, base)    SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
-#define SAVE_10GPRS(n, base)   SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
-#define REST_GPR(n, base)      ld      n,GPR0+8*(n)(base)
-#define REST_2GPRS(n, base)    REST_GPR(n, base); REST_GPR(n+1, base)
-#define REST_4GPRS(n, base)    REST_2GPRS(n, base); REST_2GPRS(n+2, base)
-#define REST_8GPRS(n, base)    REST_4GPRS(n, base); REST_4GPRS(n+4, base)
-#define REST_10GPRS(n, base)   REST_8GPRS(n, base); REST_2GPRS(n+8, base)
-
-#define SAVE_NVGPRS(base)      SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
-#define REST_NVGPRS(base)      REST_8GPRS(14, base); REST_10GPRS(22, base)
-
-#define SAVE_FPR(n, base)      stfd    n,THREAD_FPR0+8*(n)(base)
-#define SAVE_2FPRS(n, base)    SAVE_FPR(n, base); SAVE_FPR(n+1, base)
-#define SAVE_4FPRS(n, base)    SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
-#define SAVE_8FPRS(n, base)    SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
-#define SAVE_16FPRS(n, base)   SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
-#define SAVE_32FPRS(n, base)   SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
-#define REST_FPR(n, base)      lfd     n,THREAD_FPR0+8*(n)(base)
-#define REST_2FPRS(n, base)    REST_FPR(n, base); REST_FPR(n+1, base)
-#define REST_4FPRS(n, base)    REST_2FPRS(n, base); REST_2FPRS(n+2, base)
-#define REST_8FPRS(n, base)    REST_4FPRS(n, base); REST_4FPRS(n+4, base)
-#define REST_16FPRS(n, base)   REST_8FPRS(n, base); REST_8FPRS(n+8, base)
-#define REST_32FPRS(n, base)   REST_16FPRS(n, base); REST_16FPRS(n+16, base)
-
-#define SAVE_VR(n,b,base)      li b,THREAD_VR0+(16*(n));  stvx n,b,base
-#define SAVE_2VRS(n,b,base)    SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
-#define SAVE_4VRS(n,b,base)    SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
-#define SAVE_8VRS(n,b,base)    SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
-#define SAVE_16VRS(n,b,base)   SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
-#define SAVE_32VRS(n,b,base)   SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
-#define REST_VR(n,b,base)      li b,THREAD_VR0+(16*(n)); lvx n,b,base
-#define REST_2VRS(n,b,base)    REST_VR(n,b,base); REST_VR(n+1,b,base)
-#define REST_4VRS(n,b,base)    REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
-#define REST_8VRS(n,b,base)    REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
-#define REST_16VRS(n,b,base)   REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
-#define REST_32VRS(n,b,base)   REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
-
-/* Macros to adjust thread priority for Iseries hardware multithreading */
-#define HMT_LOW                or 1,1,1
-#define HMT_MEDIUM     or 2,2,2
-#define HMT_HIGH       or 3,3,3
-
-/* Insert the high 32 bits of the MSR into what will be the new
-   MSR (via SRR1 and rfid)  This preserves the MSR.SF and MSR.ISF
-   bits. */
-
-#define FIX_SRR1(ra, rb)       \
-       mr      rb,ra;          \
-       mfmsr   ra;             \
-       rldimi  ra,rb,0,32
-
-#define CLR_TOP32(r)   rlwinm  (r),(r),0,0,31  /* clear top 32 bits */
-
-/* 
- * LOADADDR( rn, name )
- *   loads the address of 'name' into 'rn'
- *
- * LOADBASE( rn, name )
- *   loads the address (less the low 16 bits) of 'name' into 'rn'
- *   suitable for base+disp addressing
- */
-#define LOADADDR(rn,name) \
-       lis     rn,name##@highest;      \
-       ori     rn,rn,name##@higher;    \
-       rldicr  rn,rn,32,31;            \
-       oris    rn,rn,name##@h;         \
-       ori     rn,rn,name##@l
-
-#define LOADBASE(rn,name) \
-       lis     rn,name@highest;        \
-       ori     rn,rn,name@higher;      \
-       rldicr  rn,rn,32,31;            \
-       oris    rn,rn,name@ha
-
-
-#define SET_REG_TO_CONST(reg, value)                   \
-       lis     reg,(((value)>>48)&0xFFFF);             \
-       ori     reg,reg,(((value)>>32)&0xFFFF);         \
-       rldicr  reg,reg,32,31;                          \
-       oris    reg,reg,(((value)>>16)&0xFFFF);         \
-       ori     reg,reg,((value)&0xFFFF);
-
-#define SET_REG_TO_LABEL(reg, label)                   \
-       lis     reg,(label)@highest;                    \
-       ori     reg,reg,(label)@higher;                 \
-       rldicr  reg,reg,32,31;                          \
-       oris    reg,reg,(label)@h;                      \
-       ori     reg,reg,(label)@l;
-
-
-/* PPPBBB - DRENG  If KERNELBASE is always 0xC0...,
- * Then we can easily do this with one asm insn. -Peter
- */
-#define tophys(rd,rs)                           \
-        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
-        rldicr  rd,rd,32,31;                    \
-        sub     rd,rs,rd
-
-#define tovirt(rd,rs)                           \
-        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
-        rldicr  rd,rd,32,31;                    \
-        add     rd,rs,rd
-
-/* Condition Register Bit Fields */
-
-#define        cr0     0
-#define        cr1     1
-#define        cr2     2
-#define        cr3     3
-#define        cr4     4
-#define        cr5     5
-#define        cr6     6
-#define        cr7     7
-
-
-/* General Purpose Registers (GPRs) */
-
-#define        r0      0
-#define        r1      1
-#define        r2      2
-#define        r3      3
-#define        r4      4
-#define        r5      5
-#define        r6      6
-#define        r7      7
-#define        r8      8
-#define        r9      9
-#define        r10     10
-#define        r11     11
-#define        r12     12
-#define        r13     13
-#define        r14     14
-#define        r15     15
-#define        r16     16
-#define        r17     17
-#define        r18     18
-#define        r19     19
-#define        r20     20
-#define        r21     21
-#define        r22     22
-#define        r23     23
-#define        r24     24
-#define        r25     25
-#define        r26     26
-#define        r27     27
-#define        r28     28
-#define        r29     29
-#define        r30     30
-#define        r31     31
-
-
-/* Floating Point Registers (FPRs) */
-
-#define        fr0     0
-#define        fr1     1
-#define        fr2     2
-#define        fr3     3
-#define        fr4     4
-#define        fr5     5
-#define        fr6     6
-#define        fr7     7
-#define        fr8     8
-#define        fr9     9
-#define        fr10    10
-#define        fr11    11
-#define        fr12    12
-#define        fr13    13
-#define        fr14    14
-#define        fr15    15
-#define        fr16    16
-#define        fr17    17
-#define        fr18    18
-#define        fr19    19
-#define        fr20    20
-#define        fr21    21
-#define        fr22    22
-#define        fr23    23
-#define        fr24    24
-#define        fr25    25
-#define        fr26    26
-#define        fr27    27
-#define        fr28    28
-#define        fr29    29
-#define        fr30    30
-#define        fr31    31
-
-#define        vr0     0
-#define        vr1     1
-#define        vr2     2
-#define        vr3     3
-#define        vr4     4
-#define        vr5     5
-#define        vr6     6
-#define        vr7     7
-#define        vr8     8
-#define        vr9     9
-#define        vr10    10
-#define        vr11    11
-#define        vr12    12
-#define        vr13    13
-#define        vr14    14
-#define        vr15    15
-#define        vr16    16
-#define        vr17    17
-#define        vr18    18
-#define        vr19    19
-#define        vr20    20
-#define        vr21    21
-#define        vr22    22
-#define        vr23    23
-#define        vr24    24
-#define        vr25    25
-#define        vr26    26
-#define        vr27    27
-#define        vr28    28
-#define        vr29    29
-#define        vr30    30
-#define        vr31    31
-
-#endif /* _PPC64_PPC_ASM_H */
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
deleted file mode 100644 (file)
index 4146189..0000000
+++ /dev/null
@@ -1,558 +0,0 @@
-#ifndef __ASM_PPC64_PROCESSOR_H
-#define __ASM_PPC64_PROCESSOR_H
-
-/*
- * Copyright (C) 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/stringify.h>
-#ifndef __ASSEMBLY__
-#include <linux/config.h>
-#include <asm/atomic.h>
-#include <asm/ppcdebug.h>
-#include <asm/a.out.h>
-#endif
-#include <asm/ptrace.h>
-#include <asm/types.h>
-#include <asm/systemcfg.h>
-#include <asm/cputable.h>
-
-/* Machine State Register (MSR) Fields */
-#define MSR_SF_LG      63              /* Enable 64 bit mode */
-#define MSR_ISF_LG     61              /* Interrupt 64b mode valid on 630 */
-#define MSR_HV_LG      60              /* Hypervisor state */
-#define MSR_VEC_LG     25              /* Enable AltiVec */
-#define MSR_POW_LG     18              /* Enable Power Management */
-#define MSR_WE_LG      18              /* Wait State Enable */
-#define MSR_TGPR_LG    17              /* TLB Update registers in use */
-#define MSR_CE_LG      17              /* Critical Interrupt Enable */
-#define MSR_ILE_LG     16              /* Interrupt Little Endian */
-#define MSR_EE_LG      15              /* External Interrupt Enable */
-#define MSR_PR_LG      14              /* Problem State / Privilege Level */
-#define MSR_FP_LG      13              /* Floating Point enable */
-#define MSR_ME_LG      12              /* Machine Check Enable */
-#define MSR_FE0_LG     11              /* Floating Exception mode 0 */
-#define MSR_SE_LG      10              /* Single Step */
-#define MSR_BE_LG      9               /* Branch Trace */
-#define MSR_DE_LG      9               /* Debug Exception Enable */
-#define MSR_FE1_LG     8               /* Floating Exception mode 1 */
-#define MSR_IP_LG      6               /* Exception prefix 0x000/0xFFF */
-#define MSR_IR_LG      5               /* Instruction Relocate */
-#define MSR_DR_LG      4               /* Data Relocate */
-#define MSR_PE_LG      3               /* Protection Enable */
-#define MSR_PX_LG      2               /* Protection Exclusive Mode */
-#define MSR_PMM_LG     2               /* Performance monitor */
-#define MSR_RI_LG      1               /* Recoverable Exception */
-#define MSR_LE_LG      0               /* Little Endian */
-
-#ifdef __ASSEMBLY__
-#define __MASK(X)      (1<<(X))
-#else
-#define __MASK(X)      (1UL<<(X))
-#endif
-
-#define MSR_SF         __MASK(MSR_SF_LG)       /* Enable 64 bit mode */
-#define MSR_ISF                __MASK(MSR_ISF_LG)      /* Interrupt 64b mode valid on 630 */
-#define MSR_HV                 __MASK(MSR_HV_LG)       /* Hypervisor state */
-#define MSR_VEC                __MASK(MSR_VEC_LG)      /* Enable AltiVec */
-#define MSR_POW                __MASK(MSR_POW_LG)      /* Enable Power Management */
-#define MSR_WE         __MASK(MSR_WE_LG)       /* Wait State Enable */
-#define MSR_TGPR       __MASK(MSR_TGPR_LG)     /* TLB Update registers in use */
-#define MSR_CE         __MASK(MSR_CE_LG)       /* Critical Interrupt Enable */
-#define MSR_ILE                __MASK(MSR_ILE_LG)      /* Interrupt Little Endian */
-#define MSR_EE         __MASK(MSR_EE_LG)       /* External Interrupt Enable */
-#define MSR_PR         __MASK(MSR_PR_LG)       /* Problem State / Privilege Level */
-#define MSR_FP         __MASK(MSR_FP_LG)       /* Floating Point enable */
-#define MSR_ME         __MASK(MSR_ME_LG)       /* Machine Check Enable */
-#define MSR_FE0                __MASK(MSR_FE0_LG)      /* Floating Exception mode 0 */
-#define MSR_SE         __MASK(MSR_SE_LG)       /* Single Step */
-#define MSR_BE         __MASK(MSR_BE_LG)       /* Branch Trace */
-#define MSR_DE         __MASK(MSR_DE_LG)       /* Debug Exception Enable */
-#define MSR_FE1                __MASK(MSR_FE1_LG)      /* Floating Exception mode 1 */
-#define MSR_IP         __MASK(MSR_IP_LG)       /* Exception prefix 0x000/0xFFF */
-#define MSR_IR         __MASK(MSR_IR_LG)       /* Instruction Relocate */
-#define MSR_DR         __MASK(MSR_DR_LG)       /* Data Relocate */
-#define MSR_PE         __MASK(MSR_PE_LG)       /* Protection Enable */
-#define MSR_PX         __MASK(MSR_PX_LG)       /* Protection Exclusive Mode */
-#define MSR_PMM                __MASK(MSR_PMM_LG)      /* Performance monitor */
-#define MSR_RI         __MASK(MSR_RI_LG)       /* Recoverable Exception */
-#define MSR_LE         __MASK(MSR_LE_LG)       /* Little Endian */
-
-#define MSR_           MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
-#define MSR_KERNEL      MSR_ | MSR_SF | MSR_HV
-
-#define MSR_USER32     MSR_ | MSR_PR | MSR_EE
-#define MSR_USER64     MSR_USER32 | MSR_SF
-
-/* Floating Point Status and Control Register (FPSCR) Fields */
-
-#define FPSCR_FX       0x80000000      /* FPU exception summary */
-#define FPSCR_FEX      0x40000000      /* FPU enabled exception summary */
-#define FPSCR_VX       0x20000000      /* Invalid operation summary */
-#define FPSCR_OX       0x10000000      /* Overflow exception summary */
-#define FPSCR_UX       0x08000000      /* Underflow exception summary */
-#define FPSCR_ZX       0x04000000      /* Zero-divide exception summary */
-#define FPSCR_XX       0x02000000      /* Inexact exception summary */
-#define FPSCR_VXSNAN   0x01000000      /* Invalid op for SNaN */
-#define FPSCR_VXISI    0x00800000      /* Invalid op for Inv - Inv */
-#define FPSCR_VXIDI    0x00400000      /* Invalid op for Inv / Inv */
-#define FPSCR_VXZDZ    0x00200000      /* Invalid op for Zero / Zero */
-#define FPSCR_VXIMZ    0x00100000      /* Invalid op for Inv * Zero */
-#define FPSCR_VXVC     0x00080000      /* Invalid op for Compare */
-#define FPSCR_FR       0x00040000      /* Fraction rounded */
-#define FPSCR_FI       0x00020000      /* Fraction inexact */
-#define FPSCR_FPRF     0x0001f000      /* FPU Result Flags */
-#define FPSCR_FPCC     0x0000f000      /* FPU Condition Codes */
-#define FPSCR_VXSOFT   0x00000400      /* Invalid op for software request */
-#define FPSCR_VXSQRT   0x00000200      /* Invalid op for square root */
-#define FPSCR_VXCVI    0x00000100      /* Invalid op for integer convert */
-#define FPSCR_VE       0x00000080      /* Invalid op exception enable */
-#define FPSCR_OE       0x00000040      /* IEEE overflow exception enable */
-#define FPSCR_UE       0x00000020      /* IEEE underflow exception enable */
-#define FPSCR_ZE       0x00000010      /* IEEE zero divide exception enable */
-#define FPSCR_XE       0x00000008      /* FP inexact exception enable */
-#define FPSCR_NI       0x00000004      /* FPU non IEEE-Mode */
-#define FPSCR_RN       0x00000003      /* FPU rounding control */
-
-/* Special Purpose Registers (SPRNs)*/
-
-#define        SPRN_CTR        0x009   /* Count Register */
-#define        SPRN_DABR       0x3F5   /* Data Address Breakpoint Register */
-#define   DABR_TRANSLATION     (1UL << 2)
-#define        SPRN_DAR        0x013   /* Data Address Register */
-#define        SPRN_DEC        0x016   /* Decrement Register */
-#define        SPRN_DSISR      0x012   /* Data Storage Interrupt Status Register */
-#define   DSISR_NOHPTE         0x40000000      /* no translation found */
-#define   DSISR_PROTFAULT      0x08000000      /* protection fault */
-#define   DSISR_ISSTORE                0x02000000      /* access was a store */
-#define   DSISR_DABRMATCH      0x00400000      /* hit data breakpoint */
-#define   DSISR_NOSEGMENT      0x00200000      /* STAB/SLB miss */
-#define        SPRN_HID0       0x3F0   /* Hardware Implementation Register 0 */
-#define        SPRN_MSRDORM    0x3F1   /* Hardware Implementation Register 1 */
-#define SPRN_HID1      0x3F1   /* Hardware Implementation Register 1 */
-#define        SPRN_IABR       0x3F2   /* Instruction Address Breakpoint Register */
-#define        SPRN_NIADORM    0x3F3   /* Hardware Implementation Register 2 */
-#define SPRN_HID4      0x3F4   /* 970 HID4 */
-#define SPRN_HID5      0x3F6   /* 970 HID5 */
-#define        SPRN_HID6       0x3F9   /* BE HID 6 */
-#define          HID6_LB       (0x0F<<12) /* Concurrent Large Page Modes */
-#define          HID6_DLP      (1<<20) /* Disable all large page modes (4K only) */
-#define        SPRN_TSCR       0x399   /* Thread switch control on BE */
-#define        SPRN_TTR        0x39A   /* Thread switch timeout on BE */
-#define          TSCR_DEC_ENABLE       0x200000 /* Decrementer Interrupt */
-#define          TSCR_EE_ENABLE        0x100000 /* External Interrupt */
-#define          TSCR_EE_BOOST         0x080000 /* External Interrupt Boost */
-#define        SPRN_TSC        0x3FD   /* Thread switch control on others */
-#define        SPRN_TST        0x3FC   /* Thread switch timeout on others */
-#define        SPRN_L2CR       0x3F9   /* Level 2 Cache Control Regsiter */
-#define        SPRN_LR         0x008   /* Link Register */
-#define        SPRN_PIR        0x3FF   /* Processor Identification Register */
-#define        SPRN_PIT        0x3DB   /* Programmable Interval Timer */
-#define        SPRN_PURR       0x135   /* Processor Utilization of Resources Register */
-#define        SPRN_PVR        0x11F   /* Processor Version Register */
-#define        SPRN_RPA        0x3D6   /* Required Physical Address Register */
-#define        SPRN_SDA        0x3BF   /* Sampled Data Address Register */
-#define        SPRN_SDR1       0x019   /* MMU Hash Base Register */
-#define        SPRN_SIA        0x3BB   /* Sampled Instruction Address Register */
-#define        SPRN_SPRG0      0x110   /* Special Purpose Register General 0 */
-#define        SPRN_SPRG1      0x111   /* Special Purpose Register General 1 */
-#define        SPRN_SPRG2      0x112   /* Special Purpose Register General 2 */
-#define        SPRN_SPRG3      0x113   /* Special Purpose Register General 3 */
-#define        SPRN_SRR0       0x01A   /* Save/Restore Register 0 */
-#define        SPRN_SRR1       0x01B   /* Save/Restore Register 1 */
-#define        SPRN_TBRL       0x10C   /* Time Base Read Lower Register (user, R/O) */
-#define        SPRN_TBRU       0x10D   /* Time Base Read Upper Register (user, R/O) */
-#define        SPRN_TBWL       0x11C   /* Time Base Lower Register (super, W/O) */
-#define        SPRN_TBWU       0x11D   /* Time Base Write Upper Register (super, W/O) */
-#define SPRN_HIOR      0x137   /* 970 Hypervisor interrupt offset */
-#define        SPRN_USIA       0x3AB   /* User Sampled Instruction Address Register */
-#define        SPRN_XER        0x001   /* Fixed Point Exception Register */
-#define SPRN_VRSAVE     0x100   /* Vector save */
-#define SPRN_CTRLF     0x088
-#define SPRN_CTRLT     0x098
-#define   CTRL_RUNLATCH        0x1
-
-/* Performance monitor SPRs */
-#define SPRN_SIAR      780
-#define SPRN_SDAR      781
-#define SPRN_MMCRA     786
-#define   MMCRA_SIHV   0x10000000UL /* state of MSR HV when SIAR set */
-#define   MMCRA_SIPR   0x08000000UL /* state of MSR PR when SIAR set */
-#define   MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
-#define SPRN_PMC1      787
-#define SPRN_PMC2      788
-#define SPRN_PMC3      789
-#define SPRN_PMC4      790
-#define SPRN_PMC5      791
-#define SPRN_PMC6      792
-#define SPRN_PMC7      793
-#define SPRN_PMC8      794
-#define SPRN_MMCR0     795
-#define   MMCR0_FC     0x80000000UL /* freeze counters. set to 1 on a perfmon exception */
-#define   MMCR0_FCS    0x40000000UL /* freeze in supervisor state */
-#define   MMCR0_KERNEL_DISABLE MMCR0_FCS
-#define   MMCR0_FCP    0x20000000UL /* freeze in problem state */
-#define   MMCR0_PROBLEM_DISABLE MMCR0_FCP
-#define   MMCR0_FCM1   0x10000000UL /* freeze counters while MSR mark = 1 */
-#define   MMCR0_FCM0   0x08000000UL /* freeze counters while MSR mark = 0 */
-#define   MMCR0_PMXE   0x04000000UL /* performance monitor exception enable */
-#define   MMCR0_FCECE  0x02000000UL /* freeze counters on enabled condition or event */
-/* time base exception enable */
-#define   MMCR0_TBEE   0x00400000UL /* time base exception enable */
-#define   MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
-#define   MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
-#define   MMCR0_TRIGGER        0x00002000UL /* TRIGGER enable */
-#define   MMCR0_PMAO   0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
-#define   MMCR0_SHRFC  0x00000040UL /* SHRre freeze conditions between threads */
-#define   MMCR0_FCTI   0x00000008UL /* freeze counters in tags inactive mode */
-#define   MMCR0_FCTA   0x00000004UL /* freeze counters in tags active mode */
-#define   MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
-#define   MMCR0_FCHV   0x00000001UL /* freeze conditions in hypervisor mode */
-#define SPRN_MMCR1     798
-
-/* Short-hand versions for a number of the above SPRNs */
-
-#define        CTR     SPRN_CTR        /* Counter Register */
-#define        DAR     SPRN_DAR        /* Data Address Register */
-#define        DABR    SPRN_DABR       /* Data Address Breakpoint Register */
-#define        DEC     SPRN_DEC        /* Decrement Register */
-#define        DSISR   SPRN_DSISR      /* Data Storage Interrupt Status Register */
-#define        HID0    SPRN_HID0       /* Hardware Implementation Register 0 */
-#define        MSRDORM SPRN_MSRDORM    /* MSR Dormant Register */
-#define        NIADORM SPRN_NIADORM    /* NIA Dormant Register */
-#define        TSC     SPRN_TSC        /* Thread switch control */
-#define        TST     SPRN_TST        /* Thread switch timeout */
-#define        IABR    SPRN_IABR       /* Instruction Address Breakpoint Register */
-#define        L2CR    SPRN_L2CR       /* PPC 750 L2 control register */
-#define        __LR    SPRN_LR
-#define        PVR     SPRN_PVR        /* Processor Version */
-#define        PIR     SPRN_PIR        /* Processor ID */
-#define        PURR    SPRN_PURR       /* Processor Utilization of Resource Register */
-#define        SDR1    SPRN_SDR1       /* MMU hash base register */
-#define        SPR0    SPRN_SPRG0      /* Supervisor Private Registers */
-#define        SPR1    SPRN_SPRG1
-#define        SPR2    SPRN_SPRG2
-#define        SPR3    SPRN_SPRG3
-#define        SPRG0   SPRN_SPRG0
-#define        SPRG1   SPRN_SPRG1
-#define        SPRG2   SPRN_SPRG2
-#define        SPRG3   SPRN_SPRG3
-#define        SRR0    SPRN_SRR0       /* Save and Restore Register 0 */
-#define        SRR1    SPRN_SRR1       /* Save and Restore Register 1 */
-#define        TBRL    SPRN_TBRL       /* Time Base Read Lower Register */
-#define        TBRU    SPRN_TBRU       /* Time Base Read Upper Register */
-#define        TBWL    SPRN_TBWL       /* Time Base Write Lower Register */
-#define        TBWU    SPRN_TBWU       /* Time Base Write Upper Register */
-#define        XER     SPRN_XER
-
-/* Processor Version Register (PVR) field extraction */
-
-#define        PVR_VER(pvr)  (((pvr) >>  16) & 0xFFFF) /* Version field */
-#define        PVR_REV(pvr)  (((pvr) >>   0) & 0xFFFF) /* Revison field */
-
-/* Processor Version Numbers */
-#define        PV_NORTHSTAR    0x0033
-#define        PV_PULSAR       0x0034
-#define        PV_POWER4       0x0035
-#define        PV_ICESTAR      0x0036
-#define        PV_SSTAR        0x0037
-#define        PV_POWER4p      0x0038
-#define PV_970         0x0039
-#define        PV_POWER5       0x003A
-#define PV_POWER5p     0x003B
-#define PV_970FX       0x003C
-#define        PV_630          0x0040
-#define        PV_630p         0x0041
-#define        PV_970MP        0x0044
-#define        PV_BE           0x0070
-
-/* Platforms supported by PPC64 */
-#define PLATFORM_PSERIES      0x0100
-#define PLATFORM_PSERIES_LPAR 0x0101
-#define PLATFORM_ISERIES_LPAR 0x0201
-#define PLATFORM_LPAR         0x0001
-#define PLATFORM_POWERMAC     0x0400
-#define PLATFORM_MAPLE        0x0500
-#define PLATFORM_BPA          0x1000
-
-/* Compatibility with drivers coming from PPC32 world */
-#define _machine       (systemcfg->platform)
-#define _MACH_Pmac     PLATFORM_POWERMAC
-
-/*
- * List of interrupt controllers.
- */
-#define IC_INVALID    0
-#define IC_OPEN_PIC   1
-#define IC_PPC_XIC    2
-#define IC_BPA_IIC    3
-
-#define XGLUE(a,b) a##b
-#define GLUE(a,b) XGLUE(a,b)
-
-#ifdef __ASSEMBLY__
-
-#define _GLOBAL(name) \
-       .section ".text"; \
-       .align 2 ; \
-       .globl name; \
-       .globl GLUE(.,name); \
-       .section ".opd","aw"; \
-name: \
-       .quad GLUE(.,name); \
-       .quad .TOC.@tocbase; \
-       .quad 0; \
-       .previous; \
-       .type GLUE(.,name),@function; \
-GLUE(.,name):
-
-#define _KPROBE(name) \
-       .section ".kprobes.text","a"; \
-       .align 2 ; \
-       .globl name; \
-       .globl GLUE(.,name); \
-       .section ".opd","aw"; \
-name: \
-       .quad GLUE(.,name); \
-       .quad .TOC.@tocbase; \
-       .quad 0; \
-       .previous; \
-       .type GLUE(.,name),@function; \
-GLUE(.,name):
-
-#define _STATIC(name) \
-       .section ".text"; \
-       .align 2 ; \
-       .section ".opd","aw"; \
-name: \
-       .quad GLUE(.,name); \
-       .quad .TOC.@tocbase; \
-       .quad 0; \
-       .previous; \
-       .type GLUE(.,name),@function; \
-GLUE(.,name):
-
-#else /* __ASSEMBLY__ */
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-/* Macros for setting and retrieving special purpose registers */
-
-#define mfmsr()                ({unsigned long rval; \
-                       asm volatile("mfmsr %0" : "=r" (rval)); rval;})
-
-#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
-                                    : : "r" (v))
-#define mtmsrd(v)      __mtmsrd((v), 0)
-
-#define mfspr(rn)      ({unsigned long rval; \
-                       asm volatile("mfspr %0," __stringify(rn) \
-                                    : "=r" (rval)); rval;})
-#define mtspr(rn, v)   asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
-
-#define mftb()         ({unsigned long rval;   \
-                       asm volatile("mftb %0" : "=r" (rval)); rval;})
-
-#define mttbl(v)       asm volatile("mttbl %0":: "r"(v))
-#define mttbu(v)       asm volatile("mttbu %0":: "r"(v))
-
-#define mfasr()                ({unsigned long rval; \
-                       asm volatile("mfasr %0" : "=r" (rval)); rval;})
-
-static inline void set_tb(unsigned int upper, unsigned int lower)
-{
-       mttbl(0);
-       mttbu(upper);
-       mttbl(lower);
-}
-
-#define __get_SP()     ({unsigned long sp; \
-                       asm volatile("mr %0,1": "=r" (sp)); sp;})
-
-#ifdef __KERNEL__
-
-extern int have_of;
-extern u64 ppc64_interrupt_controller;
-
-struct task_struct;
-void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
-void release_thread(struct task_struct *);
-
-/* Prepare to copy thread state - unlazy all lazy status */
-extern void prepare_to_copy(struct task_struct *tsk);
-
-/* Create a new kernel thread. */
-extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
-/* Lazy FPU handling on uni-processor */
-extern struct task_struct *last_task_used_math;
-extern struct task_struct *last_task_used_altivec;
-
-/* 64-bit user address space is 44-bits (16TB user VM) */
-#define TASK_SIZE_USER64 (0x0000100000000000UL)
-
-/* 
- * 32-bit user address space is 4GB - 1 page 
- * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
- */
-#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
-
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
-               TASK_SIZE_USER32 : TASK_SIZE_USER64)
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
-
-#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \
-               TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
-
-typedef struct {
-       unsigned long seg;
-} mm_segment_t;
-
-struct thread_struct {
-       unsigned long   ksp;            /* Kernel stack pointer */
-       unsigned long   ksp_vsid;
-       struct pt_regs  *regs;          /* Pointer to saved register state */
-       mm_segment_t    fs;             /* for get_fs() validation */
-       double          fpr[32];        /* Complete floating point set */
-       unsigned long   fpscr;          /* Floating point status (plus pad) */
-       unsigned long   fpexc_mode;     /* Floating-point exception mode */
-       unsigned long   start_tb;       /* Start purr when proc switched in */
-       unsigned long   accum_tb;       /* Total accumilated purr for process */
-       unsigned long   vdso_base;      /* base of the vDSO library */
-       unsigned long   dabr;           /* Data address breakpoint register */
-#ifdef CONFIG_ALTIVEC
-       /* Complete AltiVec register set */
-       vector128       vr[32] __attribute((aligned(16)));
-       /* AltiVec status */
-       vector128       vscr __attribute((aligned(16)));
-       unsigned long   vrsave;
-       int             used_vr;        /* set if process has used altivec */
-#endif /* CONFIG_ALTIVEC */
-};
-
-#define ARCH_MIN_TASKALIGN 16
-
-#define INIT_SP                (sizeof(init_stack) + (unsigned long) &init_stack)
-
-#define INIT_THREAD  { \
-       .ksp = INIT_SP, \
-       .regs = (struct pt_regs *)INIT_SP - 1, \
-       .fs = KERNEL_DS, \
-       .fpr = {0}, \
-       .fpscr = 0, \
-       .fpexc_mode = MSR_FE0|MSR_FE1, \
-}
-
-/*
- * Return saved PC of a blocked thread. For now, this is the "user" PC
- */
-#define thread_saved_pc(tsk)    \
-        ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
-
-unsigned long get_wchan(struct task_struct *p);
-
-#define KSTK_EIP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
-#define KSTK_ESP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
-
-/* Get/set floating-point exception mode */
-#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
-#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
-
-extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
-extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
-
-static inline unsigned int __unpack_fe01(unsigned long msr_bits)
-{
-       return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
-}
-
-static inline unsigned long __pack_fe01(unsigned int fpmode)
-{
-       return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
-}
-
-#define cpu_relax()    do { HMT_low(); HMT_medium(); barrier(); } while (0)
-
-/*
- * Prefetch macros.
- */
-#define ARCH_HAS_PREFETCH
-#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
-
-static inline void prefetch(const void *x)
-{
-       if (unlikely(!x))
-               return;
-
-       __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
-}
-
-static inline void prefetchw(const void *x)
-{
-       if (unlikely(!x))
-               return;
-
-       __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
-}
-
-#define spin_lock_prefetch(x)  prefetchw(x)
-
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
-static inline void ppc64_runlatch_on(void)
-{
-       unsigned long ctrl;
-
-       if (cpu_has_feature(CPU_FTR_CTRL)) {
-               ctrl = mfspr(SPRN_CTRLF);
-               ctrl |= CTRL_RUNLATCH;
-               mtspr(SPRN_CTRLT, ctrl);
-       }
-}
-
-static inline void ppc64_runlatch_off(void)
-{
-       unsigned long ctrl;
-
-       if (cpu_has_feature(CPU_FTR_CTRL)) {
-               ctrl = mfspr(SPRN_CTRLF);
-               ctrl &= ~CTRL_RUNLATCH;
-               mtspr(SPRN_CTRLT, ctrl);
-       }
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-#define RUNLATCH_ON(REG)                       \
-BEGIN_FTR_SECTION                              \
-       mfspr   (REG),SPRN_CTRLF;               \
-       ori     (REG),(REG),CTRL_RUNLATCH;      \
-       mtspr   SPRN_CTRLT,(REG);               \
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
-#endif
-
-/*
- * Number of entries in the SLB. If this ever changes we should handle
- * it with a use a cpu feature fixup.
- */
-#define SLB_NUM_ENTRIES 64
-
-#endif /* __ASM_PPC64_PROCESSOR_H */
index c02ec1d..e8d0d2a 100644 (file)
@@ -14,6 +14,7 @@
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  */
+#include <linux/config.h>
 #include <linux/proc_fs.h>
 #include <asm/atomic.h>
 
@@ -137,6 +138,9 @@ struct device_node {
        struct  kref kref;
        unsigned long _flags;
        void    *data;
+#ifdef CONFIG_PPC_ISERIES
+       struct list_head Device_List;
+#endif
 };
 
 extern struct device_node *of_chosen;
index d86f742..c5e9052 100644 (file)
@@ -77,7 +77,6 @@ extern int smt_enabled_at_boot;
 
 extern int smp_mpic_probe(void);
 extern void smp_mpic_setup_cpu(int cpu);
-extern void smp_mpic_message_pass(int target, int msg);
 extern void smp_generic_kick_cpu(int nr);
 
 extern void smp_generic_give_timebase(void);
index 375015c..2e17ef7 100644 (file)
@@ -13,7 +13,7 @@
 #include <asm/page.h>
 #include <asm/processor.h>
 #include <asm/hw_irq.h>
-#include <asm/memory.h>
+#include <asm/synch.h>
 
 /*
  * Memory barrier.
@@ -48,7 +48,7 @@
 #ifdef CONFIG_SMP
 #define smp_mb()       mb()
 #define smp_rmb()      rmb()
-#define smp_wmb()      __asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_wmb()      eieio()
 #define smp_read_barrier_depends()  read_barrier_depends()
 #else
 #define smp_mb()       __asm__ __volatile__("": : :"memory")
@@ -131,6 +131,10 @@ static inline void flush_altivec_to_thread(struct task_struct *t)
 }
 #endif
 
+static inline void flush_spe_to_thread(struct task_struct *t)
+{
+}
+
 extern int mem_init_done;      /* set on boot once kmalloc can be called */
 
 /* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
@@ -144,12 +148,7 @@ struct thread_struct;
 extern struct task_struct * _switch(struct thread_struct *prev,
                                    struct thread_struct *next);
 
-static inline int __is_processor(unsigned long pv)
-{
-       unsigned long pvr;
-       asm("mfspr %0, 0x11F" : "=r" (pvr)); 
-       return(PVR_VER(pvr) == pv);
-}
+extern int powersave_nap;      /* set if nap mode can be used in idle loop */
 
 /*
  * Atomic exchange
diff --git a/include/asm-ppc64/tce.h b/include/asm-ppc64/tce.h
new file mode 100644 (file)
index 0000000..d40b6b4
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ * Rewrite, cleanup:
+ * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _ASM_TCE_H
+#define _ASM_TCE_H
+
+/*
+ * Tces come in two formats, one for the virtual bus and a different
+ * format for PCI
+ */
+#define TCE_VB  0
+#define TCE_PCI 1
+
+/* TCE page size is 4096 bytes (1 << 12) */
+
+#define TCE_SHIFT      12
+#define TCE_PAGE_SIZE  (1 << TCE_SHIFT)
+#define TCE_PAGE_FACTOR        (PAGE_SHIFT - TCE_SHIFT)
+
+
+/* tce_entry
+ * Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
+ * abstracted so layout is irrelevant.
+ */
+union tce_entry {
+       unsigned long te_word;
+       struct {
+               unsigned int  tb_cacheBits :6;  /* Cache hash bits - not used */
+               unsigned int  tb_rsvd      :6;
+               unsigned long tb_rpn       :40; /* Real page number */
+               unsigned int  tb_valid     :1;  /* Tce is valid (vb only) */
+               unsigned int  tb_allio     :1;  /* Tce is valid for all lps (vb only) */
+               unsigned int  tb_lpindex   :8;  /* LpIndex for user of TCE (vb only) */
+               unsigned int  tb_pciwr     :1;  /* Write allowed (pci only) */
+               unsigned int  tb_rdwr      :1;  /* Read allowed  (pci), Write allowed (vb) */
+       } te_bits;
+#define te_cacheBits te_bits.tb_cacheBits
+#define te_rpn       te_bits.tb_rpn
+#define te_valid     te_bits.tb_valid
+#define te_allio     te_bits.tb_allio
+#define te_lpindex   te_bits.tb_lpindex
+#define te_pciwr     te_bits.tb_pciwr
+#define te_rdwr      te_bits.tb_rdwr
+};
+
+
+#endif
diff --git a/include/asm-ppc64/time.h b/include/asm-ppc64/time.h
deleted file mode 100644 (file)
index c6c762c..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Common time prototypes and such for all ppc machines.
- *
- * Written by Cort Dougan (cort@cs.nmt.edu) to merge
- * Paul Mackerras' version and mine for PReP and Pmac.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef __PPC64_TIME_H
-#define __PPC64_TIME_H
-
-#ifdef __KERNEL__
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/mc146818rtc.h>
-
-#include <asm/processor.h>
-#include <asm/paca.h>
-#include <asm/iSeries/HvCall.h>
-
-/* time.c */
-extern unsigned long tb_ticks_per_jiffy;
-extern unsigned long tb_ticks_per_usec;
-extern unsigned long tb_ticks_per_sec;
-extern unsigned long tb_to_xs;
-extern unsigned      tb_to_us;
-extern unsigned long tb_last_stamp;
-
-struct rtc_time;
-extern void to_tm(int tim, struct rtc_time * tm);
-extern time_t last_rtc_update;
-
-void generic_calibrate_decr(void);
-void setup_default_decr(void);
-
-/* Some sane defaults: 125 MHz timebase, 1GHz processor */
-extern unsigned long ppc_proc_freq;
-#define DEFAULT_PROC_FREQ      (DEFAULT_TB_FREQ * 8)
-extern unsigned long ppc_tb_freq;
-#define DEFAULT_TB_FREQ                125000000UL
-
-/*
- * By putting all of this stuff into a single struct we 
- * reduce the number of cache lines touched by do_gettimeofday.
- * Both by collecting all of the data in one cache line and
- * by touching only one TOC entry
- */
-struct gettimeofday_vars {
-       unsigned long tb_to_xs;
-       unsigned long stamp_xsec;
-       unsigned long tb_orig_stamp;
-};
-
-struct gettimeofday_struct {
-       unsigned long tb_ticks_per_sec;
-       struct gettimeofday_vars vars[2];
-       struct gettimeofday_vars * volatile varp;
-       unsigned      var_idx;
-       unsigned      tb_to_us;
-};
-
-struct div_result {
-       unsigned long result_high;
-       unsigned long result_low;
-};
-
-int via_calibrate_decr(void);
-
-static __inline__ unsigned long get_tb(void)
-{
-       return mftb();
-}
-
-/* Accessor functions for the decrementer register. */
-static __inline__ unsigned int get_dec(void)
-{
-       return (mfspr(SPRN_DEC));
-}
-
-static __inline__ void set_dec(int val)
-{
-#ifdef CONFIG_PPC_ISERIES
-       struct paca_struct *lpaca = get_paca();
-       int cur_dec;
-
-       if (lpaca->lppaca.shared_proc) {
-               lpaca->lppaca.virtual_decr = val;
-               cur_dec = get_dec();
-               if (cur_dec > val)
-                       HvCall_setVirtualDecr();
-       } else
-#endif
-               mtspr(SPRN_DEC, val);
-}
-
-static inline unsigned long tb_ticks_since(unsigned long tstamp)
-{
-       return get_tb() - tstamp;
-}
-
-#define mulhwu(x,y) \
-({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
-#define mulhdu(x,y) \
-({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
-
-
-unsigned mulhwu_scale_factor(unsigned, unsigned);
-void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
-                  unsigned divisor, struct div_result *dr );
-
-/* Used to store Processor Utilization register (purr) values */
-
-struct cpu_usage {
-        u64 current_tb;  /* Holds the current purr register values */
-};
-
-DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
-
-#endif /* __KERNEL__ */
-#endif /* __PPC64_TIME_H */
index 74271d7..626f505 100644 (file)
 struct mm_struct;
 struct ppc64_tlb_batch {
        unsigned long index;
-       unsigned long context;
        struct mm_struct *mm;
        pte_t pte[PPC64_TLB_BATCH_NR];
-       unsigned long addr[PPC64_TLB_BATCH_NR];
        unsigned long vaddr[PPC64_TLB_BATCH_NR];
        unsigned int large;
 };
@@ -48,8 +46,7 @@ static inline void flush_tlb_pending(void)
 #define flush_tlb_kernel_range(start, end)     flush_tlb_pending()
 #define flush_tlb_pgtables(mm, start, end)     do { } while (0)
 
-extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
-                           int local);
-void flush_hash_range(unsigned long context, unsigned long number, int local);
+extern void flush_hash_page(unsigned long va, pte_t pte, int local);
+void flush_hash_range(unsigned long number, int local);
 
 #endif /* _PPC64_TLBFLUSH_H */
index c786604..8192fb8 100644 (file)
@@ -28,4 +28,7 @@ extern unsigned long udbg_ifdebug(unsigned long flags);
 extern void __init ppcdbg_initialize(void);
 
 extern void udbg_init_uart(void __iomem *comport, unsigned int speed);
+
+struct device_node;
+extern void udbg_init_scc(struct device_node *np);
 #endif
diff --git a/include/asm-ppc64/uninorth.h b/include/asm-ppc64/uninorth.h
deleted file mode 100644 (file)
index 7ad7059..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <asm-ppc/uninorth.h>
-
diff --git a/include/asm-ppc64/unistd.h b/include/asm-ppc64/unistd.h
deleted file mode 100644 (file)
index 977bc98..0000000
+++ /dev/null
@@ -1,487 +0,0 @@
-#ifndef _ASM_PPC_UNISTD_H_
-#define _ASM_PPC_UNISTD_H_
-
-/*
- * This file contains the system call numbers.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#define __NR_restart_syscall     0
-#define __NR_exit                1
-#define __NR_fork                2
-#define __NR_read                3
-#define __NR_write               4
-#define __NR_open                5
-#define __NR_close               6
-#define __NR_waitpid             7
-#define __NR_creat               8
-#define __NR_link                9
-#define __NR_unlink             10
-#define __NR_execve             11
-#define __NR_chdir              12
-#define __NR_time               13
-#define __NR_mknod              14
-#define __NR_chmod              15
-#define __NR_lchown             16
-#define __NR_break              17
-#define __NR_oldstat            18
-#define __NR_lseek              19
-#define __NR_getpid             20
-#define __NR_mount              21
-#define __NR_umount             22
-#define __NR_setuid             23
-#define __NR_getuid             24
-#define __NR_stime              25
-#define __NR_ptrace             26
-#define __NR_alarm              27
-#define __NR_oldfstat           28
-#define __NR_pause              29
-#define __NR_utime              30
-#define __NR_stty               31
-#define __NR_gtty               32
-#define __NR_access             33
-#define __NR_nice               34
-#define __NR_ftime              35
-#define __NR_sync               36
-#define __NR_kill               37
-#define __NR_rename             38
-#define __NR_mkdir              39
-#define __NR_rmdir              40
-#define __NR_dup                41
-#define __NR_pipe               42
-#define __NR_times              43
-#define __NR_prof               44
-#define __NR_brk                45
-#define __NR_setgid             46
-#define __NR_getgid             47
-#define __NR_signal             48
-#define __NR_geteuid            49
-#define __NR_getegid            50
-#define __NR_acct               51
-#define __NR_umount2            52
-#define __NR_lock               53
-#define __NR_ioctl              54
-#define __NR_fcntl              55
-#define __NR_mpx                56
-#define __NR_setpgid            57
-#define __NR_ulimit             58
-#define __NR_oldolduname        59
-#define __NR_umask              60
-#define __NR_chroot             61
-#define __NR_ustat              62
-#define __NR_dup2               63
-#define __NR_getppid            64
-#define __NR_getpgrp            65
-#define __NR_setsid             66
-#define __NR_sigaction          67
-#define __NR_sgetmask           68
-#define __NR_ssetmask           69
-#define __NR_setreuid           70
-#define __NR_setregid           71
-#define __NR_sigsuspend                 72
-#define __NR_sigpending                 73
-#define __NR_sethostname        74
-#define __NR_setrlimit          75
-#define __NR_getrlimit          76
-#define __NR_getrusage          77
-#define __NR_gettimeofday       78
-#define __NR_settimeofday       79
-#define __NR_getgroups          80
-#define __NR_setgroups          81
-#define __NR_select             82
-#define __NR_symlink            83
-#define __NR_oldlstat           84
-#define __NR_readlink           85
-#define __NR_uselib             86
-#define __NR_swapon             87
-#define __NR_reboot             88
-#define __NR_readdir            89
-#define __NR_mmap               90
-#define __NR_munmap             91
-#define __NR_truncate           92
-#define __NR_ftruncate          93
-#define __NR_fchmod             94
-#define __NR_fchown             95
-#define __NR_getpriority        96
-#define __NR_setpriority        97
-#define __NR_profil             98
-#define __NR_statfs             99
-#define __NR_fstatfs           100
-#define __NR_ioperm            101
-#define __NR_socketcall                102
-#define __NR_syslog            103
-#define __NR_setitimer         104
-#define __NR_getitimer         105
-#define __NR_stat              106
-#define __NR_lstat             107
-#define __NR_fstat             108
-#define __NR_olduname          109
-#define __NR_iopl              110
-#define __NR_vhangup           111
-#define __NR_idle              112
-#define __NR_vm86              113
-#define __NR_wait4             114
-#define __NR_swapoff           115
-#define __NR_sysinfo           116
-#define __NR_ipc               117
-#define __NR_fsync             118
-#define __NR_sigreturn         119
-#define __NR_clone             120
-#define __NR_setdomainname     121
-#define __NR_uname             122
-#define __NR_modify_ldt                123
-#define __NR_adjtimex          124
-#define __NR_mprotect          125
-#define __NR_sigprocmask       126
-#define __NR_create_module     127
-#define __NR_init_module       128
-#define __NR_delete_module     129
-#define __NR_get_kernel_syms   130
-#define __NR_quotactl          131
-#define __NR_getpgid           132
-#define __NR_fchdir            133
-#define __NR_bdflush           134
-#define __NR_sysfs             135
-#define __NR_personality       136
-#define __NR_afs_syscall       137 /* Syscall for Andrew File System */
-#define __NR_setfsuid          138
-#define __NR_setfsgid          139
-#define __NR__llseek           140
-#define __NR_getdents          141
-#define __NR__newselect                142
-#define __NR_flock             143
-#define __NR_msync             144
-#define __NR_readv             145
-#define __NR_writev            146
-#define __NR_getsid            147
-#define __NR_fdatasync         148
-#define __NR__sysctl           149
-#define __NR_mlock             150
-#define __NR_munlock           151
-#define __NR_mlockall          152
-#define __NR_munlockall                153
-#define __NR_sched_setparam            154
-#define __NR_sched_getparam            155
-#define __NR_sched_setscheduler                156
-#define __NR_sched_getscheduler                157
-#define __NR_sched_yield               158
-#define __NR_sched_get_priority_max    159
-#define __NR_sched_get_priority_min    160
-#define __NR_sched_rr_get_interval     161
-#define __NR_nanosleep         162
-#define __NR_mremap            163
-#define __NR_setresuid         164
-#define __NR_getresuid         165
-#define __NR_query_module      166
-#define __NR_poll              167
-#define __NR_nfsservctl                168
-#define __NR_setresgid         169
-#define __NR_getresgid         170
-#define __NR_prctl             171
-#define __NR_rt_sigreturn      172
-#define __NR_rt_sigaction      173
-#define __NR_rt_sigprocmask    174
-#define __NR_rt_sigpending     175
-#define __NR_rt_sigtimedwait   176
-#define __NR_rt_sigqueueinfo   177
-#define __NR_rt_sigsuspend     178
-#define __NR_pread64           179
-#define __NR_pwrite64          180
-#define __NR_chown             181
-#define __NR_getcwd            182
-#define __NR_capget            183
-#define __NR_capset            184
-#define __NR_sigaltstack       185
-#define __NR_sendfile          186
-#define __NR_getpmsg           187     /* some people actually want streams */
-#define __NR_putpmsg           188     /* some people actually want streams */
-#define __NR_vfork             189
-#define __NR_ugetrlimit                190     /* SuS compliant getrlimit */
-#define __NR_readahead         191
-/* #define __NR_mmap2          192     32bit only */
-/* #define __NR_truncate64     193     32bit only */
-/* #define __NR_ftruncate64    194     32bit only */
-/* #define __NR_stat64         195     32bit only */
-/* #define __NR_lstat64                196     32bit only */
-/* #define __NR_fstat64                197     32bit only */
-#define __NR_pciconfig_read    198
-#define __NR_pciconfig_write   199
-#define __NR_pciconfig_iobase  200
-#define __NR_multiplexer       201
-#define __NR_getdents64                202
-#define __NR_pivot_root                203
-/* #define __NR_fcntl64                204     32bit only */
-#define __NR_madvise           205
-#define __NR_mincore           206
-#define __NR_gettid            207
-#define __NR_tkill             208
-#define __NR_setxattr          209
-#define __NR_lsetxattr         210
-#define __NR_fsetxattr         211
-#define __NR_getxattr          212
-#define __NR_lgetxattr         213
-#define __NR_fgetxattr         214
-#define __NR_listxattr         215
-#define __NR_llistxattr                216
-#define __NR_flistxattr                217
-#define __NR_removexattr       218
-#define __NR_lremovexattr      219
-#define __NR_fremovexattr      220
-#define __NR_futex             221
-#define __NR_sched_setaffinity 222
-#define __NR_sched_getaffinity 223
-/* 224 currently unused */
-#define __NR_tuxcall           225
-/* #define __NR_sendfile64     226     32bit only */
-#define __NR_io_setup          227
-#define __NR_io_destroy                228
-#define __NR_io_getevents      229
-#define __NR_io_submit         230
-#define __NR_io_cancel         231
-#define __NR_set_tid_address   232
-#define __NR_fadvise64         233
-#define __NR_exit_group                234
-#define __NR_lookup_dcookie    235
-#define __NR_epoll_create      236
-#define __NR_epoll_ctl         237
-#define __NR_epoll_wait                238
-#define __NR_remap_file_pages  239
-#define __NR_timer_create      240
-#define __NR_timer_settime     241
-#define __NR_timer_gettime     242
-#define __NR_timer_getoverrun  243
-#define __NR_timer_delete      244
-#define __NR_clock_settime     245
-#define __NR_clock_gettime     246
-#define __NR_clock_getres      247
-#define __NR_clock_nanosleep   248
-#define __NR_swapcontext       249
-#define __NR_tgkill            250
-#define __NR_utimes            251
-#define __NR_statfs64          252
-#define __NR_fstatfs64         253
-/* #define __NR_fadvise64_64   254     32bit only */
-#define __NR_rtas              255
-/* Number 256 is reserved for sys_debug_setcontext */
-/* Number 257 is reserved for vserver */
-/* 258 currently unused */
-#define __NR_mbind             259
-#define __NR_get_mempolicy     260
-#define __NR_set_mempolicy     261
-#define __NR_mq_open           262
-#define __NR_mq_unlink         263
-#define __NR_mq_timedsend      264
-#define __NR_mq_timedreceive   265
-#define __NR_mq_notify         266
-#define __NR_mq_getsetattr     267
-#define __NR_kexec_load                268
-#define __NR_add_key           269
-#define __NR_request_key       270
-#define __NR_keyctl            271
-#define __NR_waitid            272
-#define __NR_ioprio_set                273
-#define __NR_ioprio_get                274
-#define __NR_inotify_init      275
-#define __NR_inotify_add_watch 276
-#define __NR_inotify_rm_watch  277
-
-#define __NR_syscalls          278
-#ifdef __KERNEL__
-#define NR_syscalls    __NR_syscalls
-#endif
-
-#ifndef __ASSEMBLY__
-
-/* On powerpc a system call basically clobbers the same registers like a
- * function call, with the exception of LR (which is needed for the
- * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
- * an error return status).
- */
-
-#define __syscall_nr(nr, type, name, args...)                          \
-       unsigned long __sc_ret, __sc_err;                               \
-       {                                                               \
-               register unsigned long __sc_0  __asm__ ("r0");          \
-               register unsigned long __sc_3  __asm__ ("r3");          \
-               register unsigned long __sc_4  __asm__ ("r4");          \
-               register unsigned long __sc_5  __asm__ ("r5");          \
-               register unsigned long __sc_6  __asm__ ("r6");          \
-               register unsigned long __sc_7  __asm__ ("r7");          \
-               register unsigned long __sc_8  __asm__ ("r8");          \
-                                                                       \
-               __sc_loadargs_##nr(name, args);                         \
-               __asm__ __volatile__                                    \
-                       ("sc           \n\t"                            \
-                        "mfcr %0      "                                \
-                       : "=&r" (__sc_0),                               \
-                         "=&r" (__sc_3),  "=&r" (__sc_4),              \
-                         "=&r" (__sc_5),  "=&r" (__sc_6),              \
-                         "=&r" (__sc_7),  "=&r" (__sc_8)               \
-                       : __sc_asm_input_##nr                           \
-                       : "cr0", "ctr", "memory",                       \
-                               "r9", "r10","r11", "r12");              \
-               __sc_ret = __sc_3;                                      \
-               __sc_err = __sc_0;                                      \
-       }                                                               \
-       if (__sc_err & 0x10000000)                                      \
-       {                                                               \
-               errno = __sc_ret;                                       \
-               __sc_ret = -1;                                          \
-       }                                                               \
-       return (type) __sc_ret
-
-#define __sc_loadargs_0(name, dummy...)                                        \
-       __sc_0 = __NR_##name
-#define __sc_loadargs_1(name, arg1)                                    \
-       __sc_loadargs_0(name);                                          \
-       __sc_3 = (unsigned long) (arg1)
-#define __sc_loadargs_2(name, arg1, arg2)                              \
-       __sc_loadargs_1(name, arg1);                                    \
-       __sc_4 = (unsigned long) (arg2)
-#define __sc_loadargs_3(name, arg1, arg2, arg3)                                \
-       __sc_loadargs_2(name, arg1, arg2);                              \
-       __sc_5 = (unsigned long) (arg3)
-#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4)                  \
-       __sc_loadargs_3(name, arg1, arg2, arg3);                        \
-       __sc_6 = (unsigned long) (arg4)
-#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5)            \
-       __sc_loadargs_4(name, arg1, arg2, arg3, arg4);                  \
-       __sc_7 = (unsigned long) (arg5)
-#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6)      \
-       __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5);            \
-       __sc_8 = (unsigned long) (arg6)
-
-#define __sc_asm_input_0 "0" (__sc_0)
-#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
-#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
-#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
-#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
-#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
-#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8)
-
-#define _syscall0(type,name)                                           \
-type name(void)                                                                \
-{                                                                      \
-       __syscall_nr(0, type, name);                                    \
-}
-
-#define _syscall1(type,name,type1,arg1)                                        \
-type name(type1 arg1)                                                  \
-{                                                                      \
-       __syscall_nr(1, type, name, arg1);                              \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)                     \
-type name(type1 arg1, type2 arg2)                                      \
-{                                                                      \
-       __syscall_nr(2, type, name, arg1, arg2);                        \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)          \
-type name(type1 arg1, type2 arg2, type3 arg3)                          \
-{                                                                      \
-       __syscall_nr(3, type, name, arg1, arg2, arg3);                  \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4)              \
-{                                                                      \
-       __syscall_nr(4, type, name, arg1, arg2, arg3, arg4);            \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)  \
-{                                                                      \
-       __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5);      \
-}
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6)      \
-{                                                                      \
-       __syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6);        \
-}
-
-#ifdef __KERNEL_SYSCALLS__
-
-/*
- * Forking from kernel space will result in the child getting a new,
- * empty kernel stack area.  Thus the child cannot access automatic
- * variables set in the parent unless they are in registers, and the
- * procedure where the fork was done cannot return to its caller in
- * the child.
- */
-
-/*
- * System call prototypes.
- */
-static inline _syscall3(int, execve, __const__ char *, file, char **, argv,
-                       char **,envp)
-
-#endif /* __KERNEL_SYSCALLS__ */
-
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-
-#define __ARCH_WANT_IPC_PARSE_VERSION
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_COMPAT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-
-unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot,
-                      unsigned long flags, unsigned long fd, off_t offset);
-struct pt_regs;
-int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
-               unsigned long a3, unsigned long a4, unsigned long a5,
-               struct pt_regs *regs);
-int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3,
-               unsigned long p4, unsigned long p5, unsigned long p6,
-               struct pt_regs *regs);
-int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
-               unsigned long p4, unsigned long p5, unsigned long p6,
-               struct pt_regs *regs);
-int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
-               unsigned long p4, unsigned long p5, unsigned long p6,
-               struct pt_regs *regs);
-int sys_pipe(int __user *fildes);
-int sys_ptrace(long request, long pid, long addr, long data);
-struct sigaction;
-long sys_rt_sigaction(int sig, const struct sigaction __user *act,
-                     struct sigaction __user *oact, size_t sigsetsize);
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall")
-
-#endif         /* __KERNEL__ */
-
-#endif         /* __ASSEMBLY__ */
-
-#endif /* _ASM_PPC_UNISTD_H_ */
index 3ff7b92..51df337 100644 (file)
@@ -117,14 +117,16 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
                /*
                 * No locking required for CPU-local interrupts:
                 */
-               desc->handler->ack(irq);
+               if (desc->handler->ack)
+                       desc->handler->ack(irq);
                action_ret = handle_IRQ_event(irq, regs, desc->action);
                desc->handler->end(irq);
                return 1;
        }
 
        spin_lock(&desc->lock);
-       desc->handler->ack(irq);
+       if (desc->handler->ack)
+               desc->handler->ack(irq);
        /*
         * REPLAY is when Linux resends an IRQ that was dropped earlier
         * WAITING is used by probe to mark irqs that are being tested