Prepare v2023.10
[platform/kernel/u-boot.git] / arch / arm / cpu / armv8 / fsl-layerscape / mp.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2014-2015 Freescale Semiconductor, Inc.
4  */
5
6 #include <common.h>
7 #include <clock_legacy.h>
8 #include <cpu_func.h>
9 #include <image.h>
10 #include <log.h>
11 #include <asm/cache.h>
12 #include <asm/global_data.h>
13 #include <asm/io.h>
14 #include <asm/ptrace.h>
15 #include <asm/system.h>
16 #include <asm/arch/mp.h>
17 #include <asm/arch/soc.h>
18 #include <linux/compat.h>
19 #include <linux/delay.h>
20 #include <linux/psci.h>
21 #include <malloc.h>
22 #include "cpu.h"
23 #include <asm/arch-fsl-layerscape/soc.h>
24
25 DECLARE_GLOBAL_DATA_PTR;
26
27 void *get_spin_tbl_addr(void)
28 {
29         /* the spin table is at the beginning */
30         return secondary_boot_code_start;
31 }
32
33 void update_os_arch_secondary_cores(uint8_t os_arch)
34 {
35         u64 *table = get_spin_tbl_addr();
36         int i;
37
38         for (i = 1; i < CONFIG_MAX_CPUS; i++) {
39                 if (os_arch == IH_ARCH_DEFAULT)
40                         table[i * WORDS_PER_SPIN_TABLE_ENTRY +
41                                 SPIN_TABLE_ELEM_ARCH_COMP_IDX] = OS_ARCH_SAME;
42                 else
43                         table[i * WORDS_PER_SPIN_TABLE_ENTRY +
44                                 SPIN_TABLE_ELEM_ARCH_COMP_IDX] = OS_ARCH_DIFF;
45         }
46 }
47
48 #ifdef CONFIG_FSL_LSCH3
49 static void wake_secondary_core_n(int cluster, int core, int cluster_cores)
50 {
51         struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
52         struct ccsr_reset __iomem *rst = (void *)(CFG_SYS_FSL_RST_ADDR);
53         u32 mpidr = 0;
54
55         mpidr = ((cluster << 8) | core);
56         /*
57          * mpidr_el1 register value of core which needs to be released
58          * is written to scratchrw[6] register
59          */
60         gur_out32(&gur->scratchrw[6], mpidr);
61         asm volatile("dsb st" : : : "memory");
62         rst->brrl |= 1 << ((cluster * cluster_cores) + core);
63         asm volatile("dsb st" : : : "memory");
64         /*
65          * scratchrw[6] register value is polled
66          * when the value becomes zero, this means that this core is up
67          * and running, next core can be released now
68          */
69         while (gur_in32(&gur->scratchrw[6]) != 0)
70                 ;
71 }
72 #endif
73
74 int fsl_layerscape_wake_seconday_cores(void)
75 {
76         struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
77 #ifdef CONFIG_FSL_LSCH3
78         struct ccsr_reset __iomem *rst = (void *)(CFG_SYS_FSL_RST_ADDR);
79         u32 svr, ver, cluster, type;
80         int j = 0, cluster_cores = 0;
81 #elif defined(CONFIG_FSL_LSCH2)
82         struct ccsr_scfg __iomem *scfg = (void *)(CFG_SYS_FSL_SCFG_ADDR);
83 #endif
84         u32 cores, cpu_up_mask = 1;
85         int i, timeout = 10;
86         u64 *table;
87 #ifdef CONFIG_EFI_LOADER
88         void *reloc_addr;
89 #endif
90
91 #ifdef COUNTER_FREQUENCY_REAL
92         /* update for secondary cores */
93         __real_cntfrq = COUNTER_FREQUENCY_REAL;
94         flush_dcache_range((unsigned long)&__real_cntfrq,
95                            (unsigned long)&__real_cntfrq + 8);
96 #endif
97
98 #ifdef CONFIG_EFI_LOADER
99         /*
100          * EFI will reserve 64kb for its runtime services. This will probably
101          * overlap with our spin table code, which is why we have to relocate
102          * it.
103          * Keep this after the __real_cntfrq update, so we have it when we
104          * copy the complete section here.
105          */
106         reloc_addr = memalign(PAGE_SIZE,
107                               round_up(secondary_boot_code_size, PAGE_SIZE));
108         if (reloc_addr) {
109                 debug("Relocating spin table from %p to %p (size %lx)\n",
110                       secondary_boot_code_start, reloc_addr,
111                       secondary_boot_code_size);
112                 memcpy(reloc_addr, secondary_boot_code_start,
113                        secondary_boot_code_size);
114                 flush_dcache_range((unsigned long)reloc_addr,
115                                    (unsigned long)reloc_addr +
116                                                   secondary_boot_code_size);
117
118                 /* set new entry point for secondary cores */
119                 secondary_boot_addr += reloc_addr -
120                                        secondary_boot_code_start;
121                 flush_dcache_range((unsigned long)&secondary_boot_addr,
122                                    (unsigned long)&secondary_boot_addr + 8);
123
124                 /* this will be used to reserve the memory */
125                 secondary_boot_code_start = reloc_addr;
126         }
127 #endif
128
129         cores = cpu_mask();
130         /* Clear spin table so that secondary processors
131          * observe the correct value after waking up from wfe.
132          */
133         table = get_spin_tbl_addr();
134         memset(table, 0, CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE);
135         flush_dcache_range((unsigned long)table,
136                            (unsigned long)table +
137                            (CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE));
138
139         debug("Waking secondary cores to start from %lx\n", gd->relocaddr);
140
141 #ifdef CONFIG_FSL_LSCH3
142         gur_out32(&gur->bootlocptrh, (u32)(gd->relocaddr >> 32));
143         gur_out32(&gur->bootlocptrl, (u32)gd->relocaddr);
144
145         svr = gur_in32(&gur->svr);
146         ver = SVR_SOC_VER(svr);
147         if (ver == SVR_LS2080A || ver == SVR_LS2085A) {
148                 gur_out32(&gur->scratchrw[6], 1);
149                 asm volatile("dsb st" : : : "memory");
150                 rst->brrl = cores;
151                 asm volatile("dsb st" : : : "memory");
152         } else {
153                 /*
154                  * Release the cores out of reset one-at-a-time to avoid
155                  * power spikes
156                  */
157                 i = 0;
158                 cluster = in_le32(&gur->tp_cluster[i].lower);
159                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
160                         type = initiator_type(cluster, j);
161                         if (type &&
162                             TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
163                                 cluster_cores++;
164                 }
165
166                 do {
167                         cluster = in_le32(&gur->tp_cluster[i].lower);
168                         for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
169                                 type = initiator_type(cluster, j);
170                                 if (type &&
171                                     TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
172                                         wake_secondary_core_n(i, j,
173                                                               cluster_cores);
174                         }
175                 i++;
176                 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
177         }
178 #elif defined(CONFIG_FSL_LSCH2)
179         scfg_out32(&scfg->scratchrw[0], (u32)(gd->relocaddr >> 32));
180         scfg_out32(&scfg->scratchrw[1], (u32)gd->relocaddr);
181         asm volatile("dsb st" : : : "memory");
182         gur_out32(&gur->brrl, cores);
183         asm volatile("dsb st" : : : "memory");
184
185         /* Bootup online cores */
186         scfg_out32(&scfg->corebcr, cores);
187 #endif
188         /* This is needed as a precautionary measure.
189          * If some code before this has accidentally  released the secondary
190          * cores then the pre-bootloader code will trap them in a "wfe" unless
191          * the scratchrw[6] is set. In this case we need a sev here to get these
192          * cores moving again.
193          */
194         asm volatile("sev");
195
196         while (timeout--) {
197                 flush_dcache_range((unsigned long)table, (unsigned long)table +
198                                    CONFIG_MAX_CPUS * 64);
199                 for (i = 1; i < CONFIG_MAX_CPUS; i++) {
200                         if (table[i * WORDS_PER_SPIN_TABLE_ENTRY +
201                                         SPIN_TABLE_ELEM_STATUS_IDX])
202                                 cpu_up_mask |= 1 << i;
203                 }
204                 if (hweight32(cpu_up_mask) == hweight32(cores))
205                         break;
206                 udelay(10);
207         }
208         if (timeout <= 0) {
209                 printf("CPU:   Failed to bring up some cores (mask 0x%x)\n",
210                        cores ^ cpu_up_mask);
211                 return 1;
212         }
213         printf("CPU:   %d cores online\n", hweight32(cores));
214
215         return 0;
216 }
217
218 int is_core_valid(unsigned int core)
219 {
220         return !!((1 << core) & cpu_mask());
221 }
222
223 static int is_pos_valid(unsigned int pos)
224 {
225         return !!((1 << pos) & cpu_pos_mask());
226 }
227
228 int is_core_online(u64 cpu_id)
229 {
230         u64 *table = get_spin_tbl_addr();
231         int pos = id_to_core(cpu_id);
232         table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
233         return table[SPIN_TABLE_ELEM_STATUS_IDX] == 1;
234 }
235
236 int cpu_reset(u32 nr)
237 {
238         puts("Feature is not implemented.\n");
239
240         return 0;
241 }
242
243 int cpu_disable(u32 nr)
244 {
245         puts("Feature is not implemented.\n");
246
247         return 0;
248 }
249
250 static int core_to_pos(int nr)
251 {
252         u32 cores = cpu_pos_mask();
253         int i, count = 0;
254
255         if (nr == 0) {
256                 return 0;
257         } else if (nr >= hweight32(cores)) {
258                 puts("Not a valid core number.\n");
259                 return -1;
260         }
261
262         for (i = 1; i < 32; i++) {
263                 if (is_pos_valid(i)) {
264                         count++;
265                         if (count == nr)
266                                 break;
267                 }
268         }
269
270         if (count != nr)
271                 return -1;
272
273         return i;
274 }
275
276 int cpu_status(u32 nr)
277 {
278         u64 *table = get_spin_tbl_addr();
279         int pos;
280
281         if (nr == 0) {
282                 printf("table base @ 0x%p\n", table);
283         } else {
284                 pos = core_to_pos(nr);
285                 if (pos < 0)
286                         return -1;
287                 table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
288                 printf("table @ 0x%p\n", table);
289                 printf("   addr - 0x%016llx\n",
290                        table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX]);
291                 printf("   status   - 0x%016llx\n",
292                        table[SPIN_TABLE_ELEM_STATUS_IDX]);
293                 printf("   lpid  - 0x%016llx\n",
294                        table[SPIN_TABLE_ELEM_LPID_IDX]);
295         }
296
297         return 0;
298 }
299
300 int cpu_release(u32 nr, int argc, char *const argv[])
301 {
302         u64 boot_addr;
303         u64 *table = get_spin_tbl_addr();
304         int pos;
305         int ret;
306
307         boot_addr = simple_strtoull(argv[0], NULL, 16);
308
309         if (check_psci()) {
310                 /* SPIN Table is used */
311                 pos = core_to_pos(nr);
312                 if (pos <= 0)
313                         return -1;
314
315                 table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
316                 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX] = boot_addr;
317                 flush_dcache_range((unsigned long)table,
318                            (unsigned long)table + SPIN_TABLE_ELEM_SIZE);
319                 asm volatile("dsb st");
320
321                 /*
322                  * The secondary CPUs polling the spin-table above for a non-zero
323                  * value. To save power "wfe" is called. Thus call "sev" here to
324                  * wake the CPUs and let them check the spin-table again (see
325                  * slave_cpu loop in lowlevel.S)
326                  */
327                 asm volatile("sev");
328         } else {
329                 /* Use PSCI to kick the core */
330                 printf("begin to kick cpu core #%d to address %llx\n",
331                        nr, boot_addr);
332                 ret = invoke_psci_fn(PSCI_0_2_FN64_CPU_ON, nr, boot_addr, 0);
333                 if (ret)
334                         return -1;
335         }
336
337         return 0;
338 }