1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2014-2015 Freescale Semiconductor, Inc.
10 #include <asm/cache.h>
12 #include <asm/system.h>
13 #include <asm/arch/mp.h>
14 #include <asm/arch/soc.h>
15 #include <linux/delay.h>
17 #include <asm/arch-fsl-layerscape/soc.h>
18 #include <efi_loader.h>
20 DECLARE_GLOBAL_DATA_PTR;
22 void *get_spin_tbl_addr(void)
24 /* the spin table is at the beginning */
25 return secondary_boot_code_start;
28 void update_os_arch_secondary_cores(uint8_t os_arch)
30 u64 *table = get_spin_tbl_addr();
33 for (i = 1; i < CONFIG_MAX_CPUS; i++) {
34 if (os_arch == IH_ARCH_DEFAULT)
35 table[i * WORDS_PER_SPIN_TABLE_ENTRY +
36 SPIN_TABLE_ELEM_ARCH_COMP_IDX] = OS_ARCH_SAME;
38 table[i * WORDS_PER_SPIN_TABLE_ENTRY +
39 SPIN_TABLE_ELEM_ARCH_COMP_IDX] = OS_ARCH_DIFF;
43 #ifdef CONFIG_FSL_LSCH3
44 static void wake_secondary_core_n(int cluster, int core, int cluster_cores)
46 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
47 struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR);
50 mpidr = ((cluster << 8) | core);
52 * mpidr_el1 register value of core which needs to be released
53 * is written to scratchrw[6] register
55 gur_out32(&gur->scratchrw[6], mpidr);
56 asm volatile("dsb st" : : : "memory");
57 rst->brrl |= 1 << ((cluster * cluster_cores) + core);
58 asm volatile("dsb st" : : : "memory");
60 * scratchrw[6] register value is polled
61 * when the value becomes zero, this means that this core is up
62 * and running, next core can be released now
64 while (gur_in32(&gur->scratchrw[6]) != 0)
69 int fsl_layerscape_wake_seconday_cores(void)
71 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
72 #ifdef CONFIG_FSL_LSCH3
73 struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR);
74 u32 svr, ver, cluster, type;
75 int j = 0, cluster_cores = 0;
76 #elif defined(CONFIG_FSL_LSCH2)
77 struct ccsr_scfg __iomem *scfg = (void *)(CONFIG_SYS_FSL_SCFG_ADDR);
79 u32 cores, cpu_up_mask = 1;
82 #ifdef CONFIG_EFI_LOADER
83 u64 reloc_addr = U32_MAX;
87 #ifdef COUNTER_FREQUENCY_REAL
88 /* update for secondary cores */
89 __real_cntfrq = COUNTER_FREQUENCY_REAL;
90 flush_dcache_range((unsigned long)&__real_cntfrq,
91 (unsigned long)&__real_cntfrq + 8);
94 #ifdef CONFIG_EFI_LOADER
96 * EFI will reserve 64kb for its runtime services. This will probably
97 * overlap with our spin table code, which is why we have to relocate
99 * Keep this after the __real_cntfrq update, so we have it when we
100 * copy the complete section here.
102 ret = efi_allocate_pages(EFI_ALLOCATE_MAX_ADDRESS,
103 EFI_RESERVED_MEMORY_TYPE,
104 efi_size_in_pages(secondary_boot_code_size),
106 if (ret == EFI_SUCCESS) {
107 debug("Relocating spin table from %llx to %llx (size %lx)\n",
108 (u64)secondary_boot_code_start, reloc_addr,
109 secondary_boot_code_size);
110 memcpy((void *)reloc_addr, secondary_boot_code_start,
111 secondary_boot_code_size);
112 flush_dcache_range(reloc_addr,
113 reloc_addr + secondary_boot_code_size);
115 /* set new entry point for secondary cores */
116 secondary_boot_addr += (void *)reloc_addr -
117 secondary_boot_code_start;
118 flush_dcache_range((unsigned long)&secondary_boot_addr,
119 (unsigned long)&secondary_boot_addr + 8);
121 /* this will be used to reserve the memory */
122 secondary_boot_code_start = (void *)reloc_addr;
127 /* Clear spin table so that secondary processors
128 * observe the correct value after waking up from wfe.
130 table = get_spin_tbl_addr();
131 memset(table, 0, CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE);
132 flush_dcache_range((unsigned long)table,
133 (unsigned long)table +
134 (CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE));
136 debug("Waking secondary cores to start from %lx\n", gd->relocaddr);
138 #ifdef CONFIG_FSL_LSCH3
139 gur_out32(&gur->bootlocptrh, (u32)(gd->relocaddr >> 32));
140 gur_out32(&gur->bootlocptrl, (u32)gd->relocaddr);
142 svr = gur_in32(&gur->svr);
143 ver = SVR_SOC_VER(svr);
144 if (ver == SVR_LS2080A || ver == SVR_LS2085A) {
145 gur_out32(&gur->scratchrw[6], 1);
146 asm volatile("dsb st" : : : "memory");
148 asm volatile("dsb st" : : : "memory");
151 * Release the cores out of reset one-at-a-time to avoid
155 cluster = in_le32(&gur->tp_cluster[i].lower);
156 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
157 type = initiator_type(cluster, j);
159 TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
164 cluster = in_le32(&gur->tp_cluster[i].lower);
165 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
166 type = initiator_type(cluster, j);
168 TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
169 wake_secondary_core_n(i, j,
173 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
175 #elif defined(CONFIG_FSL_LSCH2)
176 scfg_out32(&scfg->scratchrw[0], (u32)(gd->relocaddr >> 32));
177 scfg_out32(&scfg->scratchrw[1], (u32)gd->relocaddr);
178 asm volatile("dsb st" : : : "memory");
179 gur_out32(&gur->brrl, cores);
180 asm volatile("dsb st" : : : "memory");
182 /* Bootup online cores */
183 scfg_out32(&scfg->corebcr, cores);
185 /* This is needed as a precautionary measure.
186 * If some code before this has accidentally released the secondary
187 * cores then the pre-bootloader code will trap them in a "wfe" unless
188 * the scratchrw[6] is set. In this case we need a sev here to get these
189 * cores moving again.
194 flush_dcache_range((unsigned long)table, (unsigned long)table +
195 CONFIG_MAX_CPUS * 64);
196 for (i = 1; i < CONFIG_MAX_CPUS; i++) {
197 if (table[i * WORDS_PER_SPIN_TABLE_ENTRY +
198 SPIN_TABLE_ELEM_STATUS_IDX])
199 cpu_up_mask |= 1 << i;
201 if (hweight32(cpu_up_mask) == hweight32(cores))
206 printf("CPU: Failed to bring up some cores (mask 0x%x)\n",
207 cores ^ cpu_up_mask);
210 printf("CPU: %d cores online\n", hweight32(cores));
215 int is_core_valid(unsigned int core)
217 return !!((1 << core) & cpu_mask());
220 static int is_pos_valid(unsigned int pos)
222 return !!((1 << pos) & cpu_pos_mask());
225 int is_core_online(u64 cpu_id)
227 u64 *table = get_spin_tbl_addr();
228 int pos = id_to_core(cpu_id);
229 table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
230 return table[SPIN_TABLE_ELEM_STATUS_IDX] == 1;
233 int cpu_reset(u32 nr)
235 puts("Feature is not implemented.\n");
240 int cpu_disable(u32 nr)
242 puts("Feature is not implemented.\n");
247 static int core_to_pos(int nr)
249 u32 cores = cpu_pos_mask();
254 } else if (nr >= hweight32(cores)) {
255 puts("Not a valid core number.\n");
259 for (i = 1; i < 32; i++) {
260 if (is_pos_valid(i)) {
273 int cpu_status(u32 nr)
275 u64 *table = get_spin_tbl_addr();
279 printf("table base @ 0x%p\n", table);
281 pos = core_to_pos(nr);
284 table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
285 printf("table @ 0x%p\n", table);
286 printf(" addr - 0x%016llx\n",
287 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX]);
288 printf(" status - 0x%016llx\n",
289 table[SPIN_TABLE_ELEM_STATUS_IDX]);
290 printf(" lpid - 0x%016llx\n",
291 table[SPIN_TABLE_ELEM_LPID_IDX]);
297 int cpu_release(u32 nr, int argc, char *const argv[])
300 u64 *table = get_spin_tbl_addr();
303 pos = core_to_pos(nr);
307 table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
308 boot_addr = simple_strtoull(argv[0], NULL, 16);
309 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX] = boot_addr;
310 flush_dcache_range((unsigned long)table,
311 (unsigned long)table + SPIN_TABLE_ELEM_SIZE);
312 asm volatile("dsb st");
315 * The secondary CPUs polling the spin-table above for a non-zero
316 * value. To save power "wfe" is called. Thus call "sev" here to
317 * wake the CPUs and let them check the spin-table again (see
318 * slave_cpu loop in lowlevel.S)