From: Arnd Bergmann Date: Mon, 8 Apr 2013 16:59:19 +0000 (+0200) Subject: Merge branch 'gic/cleanup' into next/soc2 X-Git-Tag: v5.15~20164^2~7 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=797b3a9ee790e8de2a34d427de96a1bb560fe0db;p=platform%2Fkernel%2Flinux-starfive.git Merge branch 'gic/cleanup' into next/soc2 Both zynq and shmobile have conflicts against the gic cleanup series, resolved here. Conflicts: arch/arm/mach-shmobile/smp-emev2.c arch/arm/mach-shmobile/smp-r8a7779.c arch/arm/mach-shmobile/smp-sh73a0.c arch/arm/mach-zynq/platsmp.c drivers/gpio/gpio-pl061.c Signed-off-by: Arnd Bergmann --- 797b3a9ee790e8de2a34d427de96a1bb560fe0db diff --cc arch/arm/mach-shmobile/smp-emev2.c index 8225c16,384e27d..e38691b --- a/arch/arm/mach-shmobile/smp-emev2.c +++ b/arch/arm/mach-shmobile/smp-emev2.c @@@ -31,14 -30,71 +30,9 @@@ #define EMEV2_SCU_BASE 0x1e000000 - static void __cpuinit emev2_secondary_init(unsigned int cpu) -static DEFINE_SPINLOCK(scu_lock); -static void __iomem *scu_base; - -static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) --{ - gic_secondary_init(0); - unsigned long tmp; - - /* we assume this code is running on a different cpu - * than the one that is changing coherency setting */ - spin_lock(&scu_lock); - tmp = readl(scu_base + 8); - tmp &= ~clr; - tmp |= set; - writel(tmp, scu_base + 8); - spin_unlock(&scu_lock); - -} - -static unsigned int __init emev2_get_core_count(void) -{ - if (!scu_base) { - scu_base = ioremap(EMEV2_SCU_BASE, PAGE_SIZE); - emev2_clock_init(); /* need ioremapped SMU */ - } - - WARN_ON_ONCE(!scu_base); - - return scu_base ? scu_get_core_count(scu_base) : 1; -} - -static int emev2_platform_cpu_kill(unsigned int cpu) -{ - return 0; /* not supported yet */ -} - -static int __maybe_unused emev2_cpu_kill(unsigned int cpu) -{ - int k; - - /* this function is running on another CPU than the offline target, - * here we need wait for shutdown code in platform_cpu_die() to - * finish before asking SoC-specific code to power off the CPU core. - */ - for (k = 0; k < 1000; k++) { - if (shmobile_cpu_is_dead(cpu)) - return emev2_platform_cpu_kill(cpu); - mdelay(1); - } - - return 0; --} - -- static int __cpuinit emev2_boot_secondary(unsigned int cpu, struct task_struct *idle) { - cpu = cpu_logical_map(cpu); - - /* enable cache coherency */ - modify_scu_cpu_psr(0, 3 << (cpu * 8)); - - /* Tell ROM loader about our vector (in headsmp.S) */ - emev2_set_boot_vector(__pa(shmobile_secondary_vector)); - - arch_send_wakeup_ipi_mask(cpumask_of(cpu)); + arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu))); return 0; } @@@ -69,6 -118,10 +63,5 @@@ static void __init emev2_smp_init_cpus( struct smp_operations emev2_smp_ops __initdata = { .smp_init_cpus = emev2_smp_init_cpus, .smp_prepare_cpus = emev2_smp_prepare_cpus, - .smp_secondary_init = emev2_secondary_init, .smp_boot_secondary = emev2_boot_secondary, -#ifdef CONFIG_HOTPLUG_CPU - .cpu_kill = emev2_cpu_kill, - .cpu_die = shmobile_cpu_die, - .cpu_disable = shmobile_cpu_disable, -#endif }; diff --cc arch/arm/mach-shmobile/smp-r8a7779.c index ea4535a,9949065..a853bf1 --- a/arch/arm/mach-shmobile/smp-r8a7779.c +++ b/arch/arm/mach-shmobile/smp-r8a7779.c @@@ -23,10 -23,8 +23,9 @@@ #include #include #include - #include #include #include +#include #include #include #include @@@ -82,11 -112,25 +81,6 @@@ static int r8a7779_platform_cpu_kill(un return ret ? ret : 1; } - static void __cpuinit r8a7779_secondary_init(unsigned int cpu) -static int __maybe_unused r8a7779_cpu_kill(unsigned int cpu) --{ - gic_secondary_init(0); - int k; - - /* this function is running on another CPU than the offline target, - * here we need wait for shutdown code in platform_cpu_die() to - * finish before asking SoC-specific code to power off the CPU core. - */ - for (k = 0; k < 1000; k++) { - if (shmobile_cpu_is_dead(cpu)) - return r8a7779_platform_cpu_kill(cpu); - - mdelay(1); - } - - return 0; --} - -- static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle) { struct r8a7779_pm_ch *ch = NULL; diff --cc arch/arm/mach-shmobile/smp-sh73a0.c index 5ae502b,d0f9aca..bf79626 --- a/arch/arm/mach-shmobile/smp-sh73a0.c +++ b/arch/arm/mach-shmobile/smp-sh73a0.c @@@ -49,11 -51,13 +48,6 @@@ void __init sh73a0_register_twd(void } #endif - static void __cpuinit sh73a0_secondary_init(unsigned int cpu) -static unsigned int __init sh73a0_get_core_count(void) --{ - gic_secondary_init(0); - void __iomem *scu_base = scu_base_addr(); - - return scu_get_core_count(scu_base); --} -- static int __cpuinit sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle) { cpu = cpu_logical_map(cpu); diff --cc arch/arm/mach-zynq/platsmp.c index 3072cbd,0000000..5fc167e mode 100644,000000..100644 --- a/arch/arm/mach-zynq/platsmp.c +++ b/arch/arm/mach-zynq/platsmp.c @@@ -1,152 -1,0 +1,136 @@@ +/* + * This file contains Xilinx specific SMP code, used to start up + * the second processor. + * + * Copyright (C) 2011-2013 Xilinx + * + * based on linux/arch/arm/mach-realview/platsmp.c + * + * Copyright (C) 2002 ARM Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "common.h" + +/* + * Store number of cores in the system + * Because of scu_get_core_count() must be in __init section and can't + * be called from zynq_cpun_start() because it is in __cpuinit section. + */ +static int ncores; + - /* Secondary CPU kernel startup is a 2 step process. The primary CPU - * starts the secondary CPU by giving it the address of the kernel and - * then sending it an event to wake it up. The secondary CPU then - * starts the kernel and tells the primary CPU it's up and running. - */ - static void __cpuinit zynq_secondary_init(unsigned int cpu) - { - /* - * if any interrupts are already enabled for the primary - * core (e.g. timer irq), then they will not have been enabled - * for us: do so - */ - gic_secondary_init(0); - } - +int __cpuinit zynq_cpun_start(u32 address, int cpu) +{ + u32 trampoline_code_size = &zynq_secondary_trampoline_end - + &zynq_secondary_trampoline; + + if (cpu > ncores) { + pr_warn("CPU No. is not available in the system\n"); + return -1; + } + + /* MS: Expectation that SLCR are directly map and accessible */ + /* Not possible to jump to non aligned address */ + if (!(address & 3) && (!address || (address >= trampoline_code_size))) { + /* Store pointer to ioremap area which points to address 0x0 */ + static u8 __iomem *zero; + u32 trampoline_size = &zynq_secondary_trampoline_jump - + &zynq_secondary_trampoline; + + zynq_slcr_cpu_stop(cpu); + + if (__pa(PAGE_OFFSET)) { + zero = ioremap(0, trampoline_code_size); + if (!zero) { + pr_warn("BOOTUP jump vectors not accessible\n"); + return -1; + } + } else { + zero = (__force u8 __iomem *)PAGE_OFFSET; + } + + /* + * This is elegant way how to jump to any address + * 0x0: Load address at 0x8 to r0 + * 0x4: Jump by mov instruction + * 0x8: Jumping address + */ + memcpy((__force void *)zero, &zynq_secondary_trampoline, + trampoline_size); + writel(address, zero + trampoline_size); + + flush_cache_all(); + outer_flush_range(0, trampoline_code_size); + smp_wmb(); + + if (__pa(PAGE_OFFSET)) + iounmap(zero); + + zynq_slcr_cpu_start(cpu); + + return 0; + } + + pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address); + + return -1; +} +EXPORT_SYMBOL(zynq_cpun_start); + +static int __cpuinit zynq_boot_secondary(unsigned int cpu, + struct task_struct *idle) +{ + return zynq_cpun_start(virt_to_phys(secondary_startup), cpu); +} + +/* + * Initialise the CPU possible map early - this describes the CPUs + * which may be present or become present in the system. + */ +static void __init zynq_smp_init_cpus(void) +{ + int i; + + ncores = scu_get_core_count(zynq_scu_base); + + for (i = 0; i < ncores && i < CONFIG_NR_CPUS; i++) + set_cpu_possible(i, true); +} + +static void __init zynq_smp_prepare_cpus(unsigned int max_cpus) +{ + int i; + + /* + * Initialise the present map, which describes the set of CPUs + * actually populated at the present time. + */ + for (i = 0; i < max_cpus; i++) + set_cpu_present(i, true); + + scu_enable(zynq_scu_base); +} + +struct smp_operations zynq_smp_ops __initdata = { + .smp_init_cpus = zynq_smp_init_cpus, + .smp_prepare_cpus = zynq_smp_prepare_cpus, - .smp_secondary_init = zynq_secondary_init, + .smp_boot_secondary = zynq_boot_secondary, +#ifdef CONFIG_HOTPLUG_CPU + .cpu_die = zynq_platform_cpu_die, +#endif +}; diff --cc drivers/gpio/gpio-pl061.c index 06ed257,2976336..5c06f30 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@@ -15,7 -15,7 +15,8 @@@ #include #include #include + #include +#include #include #include #include @@@ -23,9 -23,7 +24,8 @@@ #include #include #include +#include #include - #include #define GPIODIR 0x400 #define GPIOIS 0x404