Prepare v2024.10
[platform/kernel/u-boot.git] / arch / arm / mach-uniphier / dram_init.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2012-2015 Panasonic Corporation
4  * Copyright (C) 2015-2017 Socionext Inc.
5  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
6  */
7
8 #include <init.h>
9 #include <linux/errno.h>
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <linux/printk.h>
13 #include <linux/sizes.h>
14 #include <asm/global_data.h>
15 #include <asm/u-boot.h>
16
17 #include "init.h"
18 #include "sg-regs.h"
19 #include "soc-info.h"
20
21 DECLARE_GLOBAL_DATA_PTR;
22
23 struct uniphier_dram_map {
24         unsigned long base;
25         unsigned long size;
26 };
27
28 static int uniphier_memconf_decode(struct uniphier_dram_map *dram_map,
29                                    unsigned long sparse_ch1_base, bool have_ch2)
30 {
31         unsigned long size;
32         u32 val;
33
34         val = readl(sg_base + SG_MEMCONF);
35
36         /* set up ch0 */
37         dram_map[0].base = 0x80000000;
38
39         switch (val & SG_MEMCONF_CH0_SZ_MASK) {
40         case SG_MEMCONF_CH0_SZ_64M:
41                 size = SZ_64M;
42                 break;
43         case SG_MEMCONF_CH0_SZ_128M:
44                 size = SZ_128M;
45                 break;
46         case SG_MEMCONF_CH0_SZ_256M:
47                 size = SZ_256M;
48                 break;
49         case SG_MEMCONF_CH0_SZ_512M:
50                 size = SZ_512M;
51                 break;
52         case SG_MEMCONF_CH0_SZ_1G:
53                 size = SZ_1G;
54                 break;
55         default:
56                 pr_err("error: invalid value is set to MEMCONF ch0 size\n");
57                 return -EINVAL;
58         }
59
60         if ((val & SG_MEMCONF_CH0_NUM_MASK) == SG_MEMCONF_CH0_NUM_2)
61                 size *= 2;
62
63         dram_map[0].size = size;
64
65         /* set up ch1 */
66         dram_map[1].base = dram_map[0].base + size;
67
68         if (val & SG_MEMCONF_SPARSEMEM) {
69                 if (dram_map[1].base > sparse_ch1_base) {
70                         pr_warn("Sparse mem is enabled, but ch0 and ch1 overlap\n");
71                         pr_warn("Only ch0 is available\n");
72                         dram_map[1].base = 0;
73                         return 0;
74                 }
75
76                 dram_map[1].base = sparse_ch1_base;
77         }
78
79         switch (val & SG_MEMCONF_CH1_SZ_MASK) {
80         case SG_MEMCONF_CH1_SZ_64M:
81                 size = SZ_64M;
82                 break;
83         case SG_MEMCONF_CH1_SZ_128M:
84                 size = SZ_128M;
85                 break;
86         case SG_MEMCONF_CH1_SZ_256M:
87                 size = SZ_256M;
88                 break;
89         case SG_MEMCONF_CH1_SZ_512M:
90                 size = SZ_512M;
91                 break;
92         case SG_MEMCONF_CH1_SZ_1G:
93                 size = SZ_1G;
94                 break;
95         default:
96                 pr_err("error: invalid value is set to MEMCONF ch1 size\n");
97                 return -EINVAL;
98         }
99
100         if ((val & SG_MEMCONF_CH1_NUM_MASK) == SG_MEMCONF_CH1_NUM_2)
101                 size *= 2;
102
103         dram_map[1].size = size;
104
105         if (!have_ch2 || val & SG_MEMCONF_CH2_DISABLE)
106                 return 0;
107
108         /* set up ch2 */
109         dram_map[2].base = dram_map[1].base + size;
110
111         switch (val & SG_MEMCONF_CH2_SZ_MASK) {
112         case SG_MEMCONF_CH2_SZ_64M:
113                 size = SZ_64M;
114                 break;
115         case SG_MEMCONF_CH2_SZ_128M:
116                 size = SZ_128M;
117                 break;
118         case SG_MEMCONF_CH2_SZ_256M:
119                 size = SZ_256M;
120                 break;
121         case SG_MEMCONF_CH2_SZ_512M:
122                 size = SZ_512M;
123                 break;
124         case SG_MEMCONF_CH2_SZ_1G:
125                 size = SZ_1G;
126                 break;
127         default:
128                 pr_err("error: invalid value is set to MEMCONF ch2 size\n");
129                 return -EINVAL;
130         }
131
132         if ((val & SG_MEMCONF_CH2_NUM_MASK) == SG_MEMCONF_CH2_NUM_2)
133                 size *= 2;
134
135         dram_map[2].size = size;
136
137         return 0;
138 }
139
140 static int uniphier_ld4_dram_map_get(struct uniphier_dram_map dram_map[])
141 {
142         return uniphier_memconf_decode(dram_map, 0xc0000000, false);
143 }
144
145 static int uniphier_pro4_dram_map_get(struct uniphier_dram_map dram_map[])
146 {
147         return uniphier_memconf_decode(dram_map, 0xa0000000, false);
148 }
149
150 static int uniphier_pxs2_dram_map_get(struct uniphier_dram_map dram_map[])
151 {
152         return uniphier_memconf_decode(dram_map, 0xc0000000, true);
153 }
154
155 struct uniphier_dram_init_data {
156         unsigned int soc_id;
157         int (*dram_map_get)(struct uniphier_dram_map dram_map[]);
158 };
159
160 static const struct uniphier_dram_init_data uniphier_dram_init_data[] = {
161         {
162                 .soc_id = UNIPHIER_LD4_ID,
163                 .dram_map_get = uniphier_ld4_dram_map_get,
164         },
165         {
166                 .soc_id = UNIPHIER_PRO4_ID,
167                 .dram_map_get = uniphier_pro4_dram_map_get,
168         },
169         {
170                 .soc_id = UNIPHIER_SLD8_ID,
171                 .dram_map_get = uniphier_ld4_dram_map_get,
172         },
173         {
174                 .soc_id = UNIPHIER_PRO5_ID,
175                 .dram_map_get = uniphier_ld4_dram_map_get,
176         },
177         {
178                 .soc_id = UNIPHIER_PXS2_ID,
179                 .dram_map_get = uniphier_pxs2_dram_map_get,
180         },
181         {
182                 .soc_id = UNIPHIER_LD6B_ID,
183                 .dram_map_get = uniphier_pxs2_dram_map_get,
184         },
185         {
186                 .soc_id = UNIPHIER_LD11_ID,
187                 .dram_map_get = uniphier_ld4_dram_map_get,
188         },
189         {
190                 .soc_id = UNIPHIER_LD20_ID,
191                 .dram_map_get = uniphier_pxs2_dram_map_get,
192         },
193         {
194                 .soc_id = UNIPHIER_PXS3_ID,
195                 .dram_map_get = uniphier_pxs2_dram_map_get,
196         },
197 };
198 UNIPHIER_DEFINE_SOCDATA_FUNC(uniphier_get_dram_init_data,
199                              uniphier_dram_init_data)
200
201 static int uniphier_dram_map_get(struct uniphier_dram_map *dram_map)
202 {
203         const struct uniphier_dram_init_data *data;
204
205         data = uniphier_get_dram_init_data();
206         if (!data) {
207                 pr_err("unsupported SoC\n");
208                 return -ENOTSUPP;
209         }
210
211         return data->dram_map_get(dram_map);
212 }
213
214 int dram_init(void)
215 {
216         struct uniphier_dram_map dram_map[3] = {};
217         bool valid_bank_found = false;
218         unsigned long prev_top;
219         int ret, i;
220
221         gd->ram_size = 0;
222
223         ret = uniphier_dram_map_get(dram_map);
224         if (ret)
225                 return ret;
226
227         for (i = 0; i < ARRAY_SIZE(dram_map); i++) {
228                 unsigned long max_size;
229
230                 if (!dram_map[i].size)
231                         continue;
232
233                 /*
234                  * U-Boot relocates itself to the tail of the memory region,
235                  * but it does not expect sparse memory.  We use the first
236                  * contiguous chunk here.
237                  */
238                 if (valid_bank_found && prev_top < dram_map[i].base)
239                         break;
240
241                 /*
242                  * Do not use memory that exceeds 32bit address range.  U-Boot
243                  * relocates itself to the end of the effectively available RAM.
244                  * This could be a problem for DMA engines that do not support
245                  * 64bit address (SDMA of SDHCI, UniPhier AV-ether, etc.)
246                  */
247                 if (dram_map[i].base >= 1ULL << 32)
248                         break;
249
250                 max_size = (1ULL << 32) - dram_map[i].base;
251
252                 gd->ram_size = min(dram_map[i].size, max_size);
253
254                 if (!valid_bank_found)
255                         gd->ram_base = dram_map[i].base;
256
257                 prev_top = dram_map[i].base + dram_map[i].size;
258                 valid_bank_found = true;
259         }
260
261         /*
262          * LD20 uses the last 64 byte for each channel for dynamic
263          * DDR PHY training
264          */
265         if (uniphier_get_soc_id() == UNIPHIER_LD20_ID)
266                 gd->ram_size -= 64;
267
268         return 0;
269 }
270
271 int dram_init_banksize(void)
272 {
273         struct uniphier_dram_map dram_map[3] = {};
274         unsigned long base, top;
275         bool valid_bank_found = false;
276         int ret, i;
277
278         ret = uniphier_dram_map_get(dram_map);
279         if (ret)
280                 return ret;
281
282         for (i = 0; i < ARRAY_SIZE(dram_map); i++) {
283                 if (i < ARRAY_SIZE(gd->bd->bi_dram)) {
284                         gd->bd->bi_dram[i].start = dram_map[i].base;
285                         gd->bd->bi_dram[i].size = dram_map[i].size;
286                 }
287
288                 if (!dram_map[i].size)
289                         continue;
290
291                 if (!valid_bank_found)
292                         base = dram_map[i].base;
293                 top = dram_map[i].base + dram_map[i].size;
294                 valid_bank_found = true;
295         }
296
297         if (!valid_bank_found)
298                 return -EINVAL;
299
300         /* map all the DRAM regions */
301         uniphier_mem_map_init(base, top - base);
302
303         return 0;
304 }