1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2014-2015 Freescale Semiconductor, Inc.
7 #include <clock_legacy.h>
11 #include <asm/cache.h>
12 #include <asm/global_data.h>
14 #include <asm/ptrace.h>
15 #include <asm/system.h>
16 #include <asm/arch/mp.h>
17 #include <asm/arch/soc.h>
18 #include <linux/compat.h>
19 #include <linux/delay.h>
20 #include <linux/psci.h>
23 #include <asm/arch-fsl-layerscape/soc.h>
25 DECLARE_GLOBAL_DATA_PTR;
27 void *get_spin_tbl_addr(void)
29 /* the spin table is at the beginning */
30 return secondary_boot_code_start;
33 void update_os_arch_secondary_cores(uint8_t os_arch)
35 u64 *table = get_spin_tbl_addr();
38 for (i = 1; i < CONFIG_MAX_CPUS; i++) {
39 if (os_arch == IH_ARCH_DEFAULT)
40 table[i * WORDS_PER_SPIN_TABLE_ENTRY +
41 SPIN_TABLE_ELEM_ARCH_COMP_IDX] = OS_ARCH_SAME;
43 table[i * WORDS_PER_SPIN_TABLE_ENTRY +
44 SPIN_TABLE_ELEM_ARCH_COMP_IDX] = OS_ARCH_DIFF;
48 #ifdef CONFIG_FSL_LSCH3
49 static void wake_secondary_core_n(int cluster, int core, int cluster_cores)
51 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
52 struct ccsr_reset __iomem *rst = (void *)(CFG_SYS_FSL_RST_ADDR);
55 mpidr = ((cluster << 8) | core);
57 * mpidr_el1 register value of core which needs to be released
58 * is written to scratchrw[6] register
60 gur_out32(&gur->scratchrw[6], mpidr);
61 asm volatile("dsb st" : : : "memory");
62 rst->brrl |= 1 << ((cluster * cluster_cores) + core);
63 asm volatile("dsb st" : : : "memory");
65 * scratchrw[6] register value is polled
66 * when the value becomes zero, this means that this core is up
67 * and running, next core can be released now
69 while (gur_in32(&gur->scratchrw[6]) != 0)
74 int fsl_layerscape_wake_seconday_cores(void)
76 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
77 #ifdef CONFIG_FSL_LSCH3
78 struct ccsr_reset __iomem *rst = (void *)(CFG_SYS_FSL_RST_ADDR);
79 u32 svr, ver, cluster, type;
80 int j = 0, cluster_cores = 0;
81 #elif defined(CONFIG_FSL_LSCH2)
82 struct ccsr_scfg __iomem *scfg = (void *)(CFG_SYS_FSL_SCFG_ADDR);
84 u32 cores, cpu_up_mask = 1;
87 #ifdef CONFIG_EFI_LOADER
91 #ifdef COUNTER_FREQUENCY_REAL
92 /* update for secondary cores */
93 __real_cntfrq = COUNTER_FREQUENCY_REAL;
94 flush_dcache_range((unsigned long)&__real_cntfrq,
95 (unsigned long)&__real_cntfrq + 8);
98 #ifdef CONFIG_EFI_LOADER
100 * EFI will reserve 64kb for its runtime services. This will probably
101 * overlap with our spin table code, which is why we have to relocate
103 * Keep this after the __real_cntfrq update, so we have it when we
104 * copy the complete section here.
106 reloc_addr = memalign(PAGE_SIZE,
107 round_up(secondary_boot_code_size, PAGE_SIZE));
109 debug("Relocating spin table from %p to %p (size %lx)\n",
110 secondary_boot_code_start, reloc_addr,
111 secondary_boot_code_size);
112 memcpy(reloc_addr, secondary_boot_code_start,
113 secondary_boot_code_size);
114 flush_dcache_range((unsigned long)reloc_addr,
115 (unsigned long)reloc_addr +
116 secondary_boot_code_size);
118 /* set new entry point for secondary cores */
119 secondary_boot_addr += reloc_addr -
120 secondary_boot_code_start;
121 flush_dcache_range((unsigned long)&secondary_boot_addr,
122 (unsigned long)&secondary_boot_addr + 8);
124 /* this will be used to reserve the memory */
125 secondary_boot_code_start = reloc_addr;
130 /* Clear spin table so that secondary processors
131 * observe the correct value after waking up from wfe.
133 table = get_spin_tbl_addr();
134 memset(table, 0, CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE);
135 flush_dcache_range((unsigned long)table,
136 (unsigned long)table +
137 (CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE));
139 debug("Waking secondary cores to start from %lx\n", gd->relocaddr);
141 #ifdef CONFIG_FSL_LSCH3
142 gur_out32(&gur->bootlocptrh, (u32)(gd->relocaddr >> 32));
143 gur_out32(&gur->bootlocptrl, (u32)gd->relocaddr);
145 svr = gur_in32(&gur->svr);
146 ver = SVR_SOC_VER(svr);
147 if (ver == SVR_LS2080A || ver == SVR_LS2085A) {
148 gur_out32(&gur->scratchrw[6], 1);
149 asm volatile("dsb st" : : : "memory");
151 asm volatile("dsb st" : : : "memory");
154 * Release the cores out of reset one-at-a-time to avoid
158 cluster = in_le32(&gur->tp_cluster[i].lower);
159 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
160 type = initiator_type(cluster, j);
162 TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
167 cluster = in_le32(&gur->tp_cluster[i].lower);
168 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
169 type = initiator_type(cluster, j);
171 TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
172 wake_secondary_core_n(i, j,
176 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
178 #elif defined(CONFIG_FSL_LSCH2)
179 scfg_out32(&scfg->scratchrw[0], (u32)(gd->relocaddr >> 32));
180 scfg_out32(&scfg->scratchrw[1], (u32)gd->relocaddr);
181 asm volatile("dsb st" : : : "memory");
182 gur_out32(&gur->brrl, cores);
183 asm volatile("dsb st" : : : "memory");
185 /* Bootup online cores */
186 scfg_out32(&scfg->corebcr, cores);
188 /* This is needed as a precautionary measure.
189 * If some code before this has accidentally released the secondary
190 * cores then the pre-bootloader code will trap them in a "wfe" unless
191 * the scratchrw[6] is set. In this case we need a sev here to get these
192 * cores moving again.
197 flush_dcache_range((unsigned long)table, (unsigned long)table +
198 CONFIG_MAX_CPUS * 64);
199 for (i = 1; i < CONFIG_MAX_CPUS; i++) {
200 if (table[i * WORDS_PER_SPIN_TABLE_ENTRY +
201 SPIN_TABLE_ELEM_STATUS_IDX])
202 cpu_up_mask |= 1 << i;
204 if (hweight32(cpu_up_mask) == hweight32(cores))
209 printf("CPU: Failed to bring up some cores (mask 0x%x)\n",
210 cores ^ cpu_up_mask);
213 printf("CPU: %d cores online\n", hweight32(cores));
218 int is_core_valid(unsigned int core)
220 return !!((1 << core) & cpu_mask());
223 static int is_pos_valid(unsigned int pos)
225 return !!((1 << pos) & cpu_pos_mask());
228 int is_core_online(u64 cpu_id)
230 u64 *table = get_spin_tbl_addr();
231 int pos = id_to_core(cpu_id);
232 table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
233 return table[SPIN_TABLE_ELEM_STATUS_IDX] == 1;
236 int cpu_reset(u32 nr)
238 puts("Feature is not implemented.\n");
243 int cpu_disable(u32 nr)
245 puts("Feature is not implemented.\n");
250 static int core_to_pos(int nr)
252 u32 cores = cpu_pos_mask();
257 } else if (nr >= hweight32(cores)) {
258 puts("Not a valid core number.\n");
262 for (i = 1; i < 32; i++) {
263 if (is_pos_valid(i)) {
276 int cpu_status(u32 nr)
278 u64 *table = get_spin_tbl_addr();
282 printf("table base @ 0x%p\n", table);
284 pos = core_to_pos(nr);
287 table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
288 printf("table @ 0x%p\n", table);
289 printf(" addr - 0x%016llx\n",
290 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX]);
291 printf(" status - 0x%016llx\n",
292 table[SPIN_TABLE_ELEM_STATUS_IDX]);
293 printf(" lpid - 0x%016llx\n",
294 table[SPIN_TABLE_ELEM_LPID_IDX]);
300 int cpu_release(u32 nr, int argc, char *const argv[])
303 u64 *table = get_spin_tbl_addr();
307 boot_addr = simple_strtoull(argv[0], NULL, 16);
310 /* SPIN Table is used */
311 pos = core_to_pos(nr);
315 table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
316 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX] = boot_addr;
317 flush_dcache_range((unsigned long)table,
318 (unsigned long)table + SPIN_TABLE_ELEM_SIZE);
319 asm volatile("dsb st");
322 * The secondary CPUs polling the spin-table above for a non-zero
323 * value. To save power "wfe" is called. Thus call "sev" here to
324 * wake the CPUs and let them check the spin-table again (see
325 * slave_cpu loop in lowlevel.S)
329 /* Use PSCI to kick the core */
330 printf("begin to kick cpu core #%d to address %llx\n",
332 ret = invoke_psci_fn(PSCI_0_2_FN64_CPU_ON, nr, boot_addr, 0);