1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2012-2015 Panasonic Corporation
4 * Copyright (C) 2015-2017 Socionext Inc.
5 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
9 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/printk.h>
13 #include <linux/sizes.h>
14 #include <asm/global_data.h>
20 DECLARE_GLOBAL_DATA_PTR;
22 struct uniphier_dram_map {
27 static int uniphier_memconf_decode(struct uniphier_dram_map *dram_map,
28 unsigned long sparse_ch1_base, bool have_ch2)
33 val = readl(sg_base + SG_MEMCONF);
36 dram_map[0].base = 0x80000000;
38 switch (val & SG_MEMCONF_CH0_SZ_MASK) {
39 case SG_MEMCONF_CH0_SZ_64M:
42 case SG_MEMCONF_CH0_SZ_128M:
45 case SG_MEMCONF_CH0_SZ_256M:
48 case SG_MEMCONF_CH0_SZ_512M:
51 case SG_MEMCONF_CH0_SZ_1G:
55 pr_err("error: invalid value is set to MEMCONF ch0 size\n");
59 if ((val & SG_MEMCONF_CH0_NUM_MASK) == SG_MEMCONF_CH0_NUM_2)
62 dram_map[0].size = size;
65 dram_map[1].base = dram_map[0].base + size;
67 if (val & SG_MEMCONF_SPARSEMEM) {
68 if (dram_map[1].base > sparse_ch1_base) {
69 pr_warn("Sparse mem is enabled, but ch0 and ch1 overlap\n");
70 pr_warn("Only ch0 is available\n");
75 dram_map[1].base = sparse_ch1_base;
78 switch (val & SG_MEMCONF_CH1_SZ_MASK) {
79 case SG_MEMCONF_CH1_SZ_64M:
82 case SG_MEMCONF_CH1_SZ_128M:
85 case SG_MEMCONF_CH1_SZ_256M:
88 case SG_MEMCONF_CH1_SZ_512M:
91 case SG_MEMCONF_CH1_SZ_1G:
95 pr_err("error: invalid value is set to MEMCONF ch1 size\n");
99 if ((val & SG_MEMCONF_CH1_NUM_MASK) == SG_MEMCONF_CH1_NUM_2)
102 dram_map[1].size = size;
104 if (!have_ch2 || val & SG_MEMCONF_CH2_DISABLE)
108 dram_map[2].base = dram_map[1].base + size;
110 switch (val & SG_MEMCONF_CH2_SZ_MASK) {
111 case SG_MEMCONF_CH2_SZ_64M:
114 case SG_MEMCONF_CH2_SZ_128M:
117 case SG_MEMCONF_CH2_SZ_256M:
120 case SG_MEMCONF_CH2_SZ_512M:
123 case SG_MEMCONF_CH2_SZ_1G:
127 pr_err("error: invalid value is set to MEMCONF ch2 size\n");
131 if ((val & SG_MEMCONF_CH2_NUM_MASK) == SG_MEMCONF_CH2_NUM_2)
134 dram_map[2].size = size;
139 static int uniphier_ld4_dram_map_get(struct uniphier_dram_map dram_map[])
141 return uniphier_memconf_decode(dram_map, 0xc0000000, false);
144 static int uniphier_pro4_dram_map_get(struct uniphier_dram_map dram_map[])
146 return uniphier_memconf_decode(dram_map, 0xa0000000, false);
149 static int uniphier_pxs2_dram_map_get(struct uniphier_dram_map dram_map[])
151 return uniphier_memconf_decode(dram_map, 0xc0000000, true);
154 struct uniphier_dram_init_data {
156 int (*dram_map_get)(struct uniphier_dram_map dram_map[]);
159 static const struct uniphier_dram_init_data uniphier_dram_init_data[] = {
161 .soc_id = UNIPHIER_LD4_ID,
162 .dram_map_get = uniphier_ld4_dram_map_get,
165 .soc_id = UNIPHIER_PRO4_ID,
166 .dram_map_get = uniphier_pro4_dram_map_get,
169 .soc_id = UNIPHIER_SLD8_ID,
170 .dram_map_get = uniphier_ld4_dram_map_get,
173 .soc_id = UNIPHIER_PRO5_ID,
174 .dram_map_get = uniphier_ld4_dram_map_get,
177 .soc_id = UNIPHIER_PXS2_ID,
178 .dram_map_get = uniphier_pxs2_dram_map_get,
181 .soc_id = UNIPHIER_LD6B_ID,
182 .dram_map_get = uniphier_pxs2_dram_map_get,
185 .soc_id = UNIPHIER_LD11_ID,
186 .dram_map_get = uniphier_ld4_dram_map_get,
189 .soc_id = UNIPHIER_LD20_ID,
190 .dram_map_get = uniphier_pxs2_dram_map_get,
193 .soc_id = UNIPHIER_PXS3_ID,
194 .dram_map_get = uniphier_pxs2_dram_map_get,
197 UNIPHIER_DEFINE_SOCDATA_FUNC(uniphier_get_dram_init_data,
198 uniphier_dram_init_data)
200 static int uniphier_dram_map_get(struct uniphier_dram_map *dram_map)
202 const struct uniphier_dram_init_data *data;
204 data = uniphier_get_dram_init_data();
206 pr_err("unsupported SoC\n");
210 return data->dram_map_get(dram_map);
215 struct uniphier_dram_map dram_map[3] = {};
216 bool valid_bank_found = false;
217 unsigned long prev_top;
222 ret = uniphier_dram_map_get(dram_map);
226 for (i = 0; i < ARRAY_SIZE(dram_map); i++) {
227 unsigned long max_size;
229 if (!dram_map[i].size)
233 * U-Boot relocates itself to the tail of the memory region,
234 * but it does not expect sparse memory. We use the first
235 * contiguous chunk here.
237 if (valid_bank_found && prev_top < dram_map[i].base)
241 * Do not use memory that exceeds 32bit address range. U-Boot
242 * relocates itself to the end of the effectively available RAM.
243 * This could be a problem for DMA engines that do not support
244 * 64bit address (SDMA of SDHCI, UniPhier AV-ether, etc.)
246 if (dram_map[i].base >= 1ULL << 32)
249 max_size = (1ULL << 32) - dram_map[i].base;
251 if (dram_map[i].size > max_size) {
252 gd->ram_size += max_size;
256 gd->ram_size += dram_map[i].size;
258 if (!valid_bank_found)
259 gd->ram_base = dram_map[i].base;
261 prev_top = dram_map[i].base + dram_map[i].size;
262 valid_bank_found = true;
266 * LD20 uses the last 64 byte for each channel for dynamic
269 if (uniphier_get_soc_id() == UNIPHIER_LD20_ID)
275 int dram_init_banksize(void)
277 struct uniphier_dram_map dram_map[3] = {};
278 unsigned long base, top;
279 bool valid_bank_found = false;
282 ret = uniphier_dram_map_get(dram_map);
286 for (i = 0; i < ARRAY_SIZE(dram_map); i++) {
287 if (i < ARRAY_SIZE(gd->bd->bi_dram)) {
288 gd->bd->bi_dram[i].start = dram_map[i].base;
289 gd->bd->bi_dram[i].size = dram_map[i].size;
292 if (!dram_map[i].size)
295 if (!valid_bank_found)
296 base = dram_map[i].base;
297 top = dram_map[i].base + dram_map[i].size;
298 valid_bank_found = true;
301 if (!valid_bank_found)
304 /* map all the DRAM regions */
305 uniphier_mem_map_init(base, top - base);