- Move cache initialization to C from assembly.
- Move anomaly workaround for writing [ID]MEM_CONTROL to assembly, so
that we don't have to mess around with .align directives in C source.
- Fix a bug where bfin_write_DMEM_CONTROL would write to IMEM_CONTROL
- Break out CPLB related code from kernel/setup.c into their own file.
- Don't define variables in header files, only declare them.
Signed-off-by: Bernd Schmidt <bernd.schmidt@analog.com>
Signed-off-by: Bryan Wu <bryan.wu@analog.com>
obj-y := \
entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \
sys_bfin.o time.o traps.o irqchip.o dma-mapping.o flat.o \
- fixed_code.o
+ fixed_code.o cplbinit.o cacheinit.o
obj-$(CONFIG_BF53x) += bfin_gpio.o
obj-$(CONFIG_BF561) += bfin_gpio.o
--- /dev/null
+/*
+ * Copyright 2004-2007 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/blackfin.h>
+#include <asm/cplbinit.h>
+
+#if defined(CONFIG_BLKFIN_CACHE)
+void bfin_icache_init(void)
+{
+ unsigned long *table = icplb_table;
+ unsigned long ctrl;
+ int i;
+
+ for (i = 0; i < MAX_CPLBS; i++) {
+ unsigned long addr = *table++;
+ unsigned long data = *table++;
+ if (addr == (unsigned long)-1)
+ break;
+ bfin_write32(ICPLB_ADDR0 + i * 4, addr);
+ bfin_write32(ICPLB_DATA0 + i * 4, data);
+ }
+ ctrl = bfin_read_IMEM_CONTROL();
+ ctrl |= IMC | ENICPLB;
+ bfin_write_IMEM_CONTROL(ctrl);
+}
+#endif
+
+#if defined(CONFIG_BLKFIN_DCACHE)
+void bfin_dcache_init(void)
+{
+ unsigned long *table = dcplb_table;
+ unsigned long ctrl;
+ int i;
+
+ for (i = 0; i < MAX_CPLBS; i++) {
+ unsigned long addr = *table++;
+ unsigned long data = *table++;
+ if (addr == (unsigned long)-1)
+ break;
+ bfin_write32(DCPLB_ADDR0 + i * 4, addr);
+ bfin_write32(DCPLB_DATA0 + i * 4, data);
+ }
+ ctrl = bfin_read_DMEM_CONTROL();
+ ctrl |= DMEM_CNTR;
+ bfin_write_DMEM_CONTROL(ctrl);
+}
+#endif
--- /dev/null
+/*
+ * Blackfin CPLB initialization
+ *
+ * Copyright 2004-2007 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/module.h>
+
+#include <asm/blackfin.h>
+#include <asm/cplbinit.h>
+
+u_long icplb_table[MAX_CPLBS+1];
+u_long dcplb_table[MAX_CPLBS+1];
+
+#ifdef CONFIG_CPLB_SWITCH_TAB_L1
+u_long ipdt_table[MAX_SWITCH_I_CPLBS+1]__attribute__((l1_data));
+u_long dpdt_table[MAX_SWITCH_D_CPLBS+1]__attribute__((l1_data));
+
+#ifdef CONFIG_CPLB_INFO
+u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS]__attribute__((l1_data));
+u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS]__attribute__((l1_data));
+#endif /* CONFIG_CPLB_INFO */
+
+#else
+
+u_long ipdt_table[MAX_SWITCH_I_CPLBS+1];
+u_long dpdt_table[MAX_SWITCH_D_CPLBS+1];
+
+#ifdef CONFIG_CPLB_INFO
+u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS];
+u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS];
+#endif /* CONFIG_CPLB_INFO */
+
+#endif /*CONFIG_CPLB_SWITCH_TAB_L1*/
+
+struct s_cplb {
+ struct cplb_tab init_i;
+ struct cplb_tab init_d;
+ struct cplb_tab switch_i;
+ struct cplb_tab switch_d;
+};
+
+#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE)
+static struct cplb_desc cplb_data[] = {
+ {
+ .start = 0,
+ .end = SIZE_1K,
+ .psize = SIZE_1K,
+ .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
+ .i_conf = SDRAM_OOPS,
+ .d_conf = SDRAM_OOPS,
+#if defined(CONFIG_DEBUG_HUNT_FOR_ZERO)
+ .valid = 1,
+#else
+ .valid = 0,
+#endif
+ .name = "ZERO Pointer Saveguard",
+ },
+ {
+ .start = L1_CODE_START,
+ .end = L1_CODE_START + L1_CODE_LENGTH,
+ .psize = SIZE_4M,
+ .attr = INITIAL_T | SWITCH_T | I_CPLB,
+ .i_conf = L1_IMEMORY,
+ .d_conf = 0,
+ .valid = 1,
+ .name = "L1 I-Memory",
+ },
+ {
+ .start = L1_DATA_A_START,
+ .end = L1_DATA_B_START + L1_DATA_B_LENGTH,
+ .psize = SIZE_4M,
+ .attr = INITIAL_T | SWITCH_T | D_CPLB,
+ .i_conf = 0,
+ .d_conf = L1_DMEMORY,
+#if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0))
+ .valid = 1,
+#else
+ .valid = 0,
+#endif
+ .name = "L1 D-Memory",
+ },
+ {
+ .start = 0,
+ .end = 0, /* dynamic */
+ .psize = 0,
+ .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
+ .i_conf = SDRAM_IGENERIC,
+ .d_conf = SDRAM_DGENERIC,
+ .valid = 1,
+ .name = "SDRAM Kernel",
+ },
+ {
+ .start = 0, /* dynamic */
+ .end = 0, /* dynamic */
+ .psize = 0,
+ .attr = INITIAL_T | SWITCH_T | D_CPLB,
+ .i_conf = SDRAM_IGENERIC,
+ .d_conf = SDRAM_DNON_CHBL,
+ .valid = 1,
+ .name = "SDRAM RAM MTD",
+ },
+ {
+ .start = 0, /* dynamic */
+ .end = 0, /* dynamic */
+ .psize = SIZE_1M,
+ .attr = INITIAL_T | SWITCH_T | D_CPLB,
+ .d_conf = SDRAM_DNON_CHBL,
+ .valid = 1,
+ .name = "SDRAM Uncached DMA ZONE",
+ },
+ {
+ .start = 0, /* dynamic */
+ .end = 0, /* dynamic */
+ .psize = 0,
+ .attr = SWITCH_T | D_CPLB,
+ .i_conf = 0, /* dynamic */
+ .d_conf = 0, /* dynamic */
+ .valid = 1,
+ .name = "SDRAM Reserved Memory",
+ },
+ {
+ .start = ASYNC_BANK0_BASE,
+ .end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE,
+ .psize = 0,
+ .attr = SWITCH_T | D_CPLB,
+ .d_conf = SDRAM_EBIU,
+ .valid = 1,
+ .name = "ASYNC Memory",
+ },
+ {
+#if defined(CONFIG_BF561)
+ .start = L2_SRAM,
+ .end = L2_SRAM_END,
+ .psize = SIZE_1M,
+ .attr = SWITCH_T | D_CPLB,
+ .i_conf = L2_MEMORY,
+ .d_conf = L2_MEMORY,
+ .valid = 1,
+#else
+ .valid = 0,
+#endif
+ .name = "L2 Memory",
+ }
+};
+
+static u16 __init lock_kernel_check(u32 start, u32 end)
+{
+ if ((start <= (u32) _stext && end >= (u32) _end)
+ || (start >= (u32) _stext && end <= (u32) _end))
+ return IN_KERNEL;
+ return 0;
+}
+
+static unsigned short __init
+fill_cplbtab(struct cplb_tab *table,
+ unsigned long start, unsigned long end,
+ unsigned long block_size, unsigned long cplb_data)
+{
+ int i;
+
+ switch (block_size) {
+ case SIZE_4M:
+ i = 3;
+ break;
+ case SIZE_1M:
+ i = 2;
+ break;
+ case SIZE_4K:
+ i = 1;
+ break;
+ case SIZE_1K:
+ default:
+ i = 0;
+ break;
+ }
+
+ cplb_data = (cplb_data & ~(3 << 16)) | (i << 16);
+
+ while ((start < end) && (table->pos < table->size)) {
+
+ table->tab[table->pos++] = start;
+
+ if (lock_kernel_check(start, start + block_size) == IN_KERNEL)
+ table->tab[table->pos++] =
+ cplb_data | CPLB_LOCK | CPLB_DIRTY;
+ else
+ table->tab[table->pos++] = cplb_data;
+
+ start += block_size;
+ }
+ return 0;
+}
+
+static unsigned short __init
+close_cplbtab(struct cplb_tab *table)
+{
+
+ while (table->pos < table->size) {
+
+ table->tab[table->pos++] = 0;
+ table->tab[table->pos++] = 0; /* !CPLB_VALID */
+ }
+ return 0;
+}
+
+/* helper function */
+static void __fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
+{
+ if (cplb_data[i].psize) {
+ fill_cplbtab(t,
+ cplb_data[i].start,
+ cplb_data[i].end,
+ cplb_data[i].psize,
+ cplb_data[i].i_conf);
+ } else {
+#if (defined(CONFIG_BLKFIN_CACHE) && defined(ANOMALY_05000263))
+ if (i == SDRAM_KERN) {
+ fill_cplbtab(t,
+ cplb_data[i].start,
+ cplb_data[i].end,
+ SIZE_4M,
+ cplb_data[i].i_conf);
+ } else
+#endif
+ {
+ fill_cplbtab(t,
+ cplb_data[i].start,
+ a_start,
+ SIZE_1M,
+ cplb_data[i].i_conf);
+ fill_cplbtab(t,
+ a_start,
+ a_end,
+ SIZE_4M,
+ cplb_data[i].i_conf);
+ fill_cplbtab(t, a_end,
+ cplb_data[i].end,
+ SIZE_1M,
+ cplb_data[i].i_conf);
+ }
+ }
+}
+
+static void __fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
+{
+ if (cplb_data[i].psize) {
+ fill_cplbtab(t,
+ cplb_data[i].start,
+ cplb_data[i].end,
+ cplb_data[i].psize,
+ cplb_data[i].d_conf);
+ } else {
+ fill_cplbtab(t,
+ cplb_data[i].start,
+ a_start, SIZE_1M,
+ cplb_data[i].d_conf);
+ fill_cplbtab(t, a_start,
+ a_end, SIZE_4M,
+ cplb_data[i].d_conf);
+ fill_cplbtab(t, a_end,
+ cplb_data[i].end,
+ SIZE_1M,
+ cplb_data[i].d_conf);
+ }
+}
+
+void __init generate_cpl_tables(void)
+{
+
+ u16 i, j, process;
+ u32 a_start, a_end, as, ae, as_1m;
+
+ struct cplb_tab *t_i = NULL;
+ struct cplb_tab *t_d = NULL;
+ struct s_cplb cplb;
+
+ cplb.init_i.size = MAX_CPLBS;
+ cplb.init_d.size = MAX_CPLBS;
+ cplb.switch_i.size = MAX_SWITCH_I_CPLBS;
+ cplb.switch_d.size = MAX_SWITCH_D_CPLBS;
+
+ cplb.init_i.pos = 0;
+ cplb.init_d.pos = 0;
+ cplb.switch_i.pos = 0;
+ cplb.switch_d.pos = 0;
+
+ cplb.init_i.tab = icplb_table;
+ cplb.init_d.tab = dcplb_table;
+ cplb.switch_i.tab = ipdt_table;
+ cplb.switch_d.tab = dpdt_table;
+
+ cplb_data[SDRAM_KERN].end = memory_end;
+
+#ifdef CONFIG_MTD_UCLINUX
+ cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start;
+ cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size;
+ cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0;
+# if defined(CONFIG_ROMFS_FS)
+ cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB;
+
+ /*
+ * The ROMFS_FS size is often not multiple of 1MB.
+ * This can cause multiple CPLB sets covering the same memory area.
+ * This will then cause multiple CPLB hit exceptions.
+ * Workaround: We ensure a contiguous memory area by extending the kernel
+ * memory section over the mtd section.
+ * For ROMFS_FS memory must be covered with ICPLBs anyways.
+ * So there is no difference between kernel and mtd memory setup.
+ */
+
+ cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;;
+ cplb_data[SDRAM_RAM_MTD].valid = 0;
+
+# endif
+#else
+ cplb_data[SDRAM_RAM_MTD].valid = 0;
+#endif
+
+ cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION;
+ cplb_data[SDRAM_DMAZ].end = _ramend;
+
+ cplb_data[RES_MEM].start = _ramend;
+ cplb_data[RES_MEM].end = physical_mem_end;
+
+ if (reserved_mem_dcache_on)
+ cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC;
+ else
+ cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL;
+
+ if (reserved_mem_icache_on)
+ cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC;
+ else
+ cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL;
+
+ for (i = ZERO_P; i <= L2_MEM; i++) {
+ if (!cplb_data[i].valid)
+ continue;
+
+ as_1m = cplb_data[i].start % SIZE_1M;
+
+ /* We need to make sure all sections are properly 1M aligned
+ * However between Kernel Memory and the Kernel mtd section, depending on the
+ * rootfs size, there can be overlapping memory areas.
+ */
+
+ if (as_1m && i != L1I_MEM && i != L1D_MEM) {
+#ifdef CONFIG_MTD_UCLINUX
+ if (i == SDRAM_RAM_MTD) {
+ if ((cplb_data[SDRAM_KERN].end + 1) > cplb_data[SDRAM_RAM_MTD].start)
+ cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)) + SIZE_1M;
+ else
+ cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M));
+ } else
+#endif
+ printk(KERN_WARNING "Unaligned Start of %s at 0x%X\n",
+ cplb_data[i].name, cplb_data[i].start);
+ }
+
+ as = cplb_data[i].start % SIZE_4M;
+ ae = cplb_data[i].end % SIZE_4M;
+
+ if (as)
+ a_start = cplb_data[i].start + (SIZE_4M - (as));
+ else
+ a_start = cplb_data[i].start;
+
+ a_end = cplb_data[i].end - ae;
+
+ for (j = INITIAL_T; j <= SWITCH_T; j++) {
+
+ switch (j) {
+ case INITIAL_T:
+ if (cplb_data[i].attr & INITIAL_T) {
+ t_i = &cplb.init_i;
+ t_d = &cplb.init_d;
+ process = 1;
+ } else
+ process = 0;
+ break;
+ case SWITCH_T:
+ if (cplb_data[i].attr & SWITCH_T) {
+ t_i = &cplb.switch_i;
+ t_d = &cplb.switch_d;
+ process = 1;
+ } else
+ process = 0;
+ break;
+ default:
+ process = 0;
+ break;
+ }
+
+ if (!process)
+ continue;
+ if (cplb_data[i].attr & I_CPLB)
+ __fill_code_cplbtab(t_i, i, a_start, a_end);
+
+ if (cplb_data[i].attr & D_CPLB)
+ __fill_data_cplbtab(t_d, i, a_start, a_end);
+ }
+ }
+
+/* close tables */
+
+ close_cplbtab(&cplb.init_i);
+ close_cplbtab(&cplb.init_d);
+
+ cplb.init_i.tab[cplb.init_i.pos] = -1;
+ cplb.init_d.tab[cplb.init_d.pos] = -1;
+ cplb.switch_i.tab[cplb.switch_i.pos] = -1;
+ cplb.switch_d.tab[cplb.switch_d.pos] = -1;
+
+}
+
+#endif
+
char __initdata command_line[COMMAND_LINE_SIZE];
-#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE)
-static void generate_cpl_tables(void);
-#endif
-
void __init bf53x_cache_init(void)
{
#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE)
_bfin_swrst = bfin_read_SWRST();
#endif
- bf53x_cache_init();
-
printk(KERN_INFO "Hardware Trace Enabled\n");
bfin_write_TBUFCTL(0x03);
!= ATOMIC_AND32 - FIXED_CODE_START);
BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
!= ATOMIC_XOR32 - FIXED_CODE_START);
+
+ bf53x_cache_init();
}
static int __init topology_init(void)
subsys_initcall(topology_init);
-#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE)
-static u16 __init lock_kernel_check(u32 start, u32 end)
-{
- if ((start <= (u32) _stext && end >= (u32) _end)
- || (start >= (u32) _stext && end <= (u32) _end))
- return IN_KERNEL;
- return 0;
-}
-
-static unsigned short __init
-fill_cplbtab(struct cplb_tab *table,
- unsigned long start, unsigned long end,
- unsigned long block_size, unsigned long cplb_data)
-{
- int i;
-
- switch (block_size) {
- case SIZE_4M:
- i = 3;
- break;
- case SIZE_1M:
- i = 2;
- break;
- case SIZE_4K:
- i = 1;
- break;
- case SIZE_1K:
- default:
- i = 0;
- break;
- }
-
- cplb_data = (cplb_data & ~(3 << 16)) | (i << 16);
-
- while ((start < end) && (table->pos < table->size)) {
-
- table->tab[table->pos++] = start;
-
- if (lock_kernel_check(start, start + block_size) == IN_KERNEL)
- table->tab[table->pos++] =
- cplb_data | CPLB_LOCK | CPLB_DIRTY;
- else
- table->tab[table->pos++] = cplb_data;
-
- start += block_size;
- }
- return 0;
-}
-
-static unsigned short __init
-close_cplbtab(struct cplb_tab *table)
-{
-
- while (table->pos < table->size) {
-
- table->tab[table->pos++] = 0;
- table->tab[table->pos++] = 0; /* !CPLB_VALID */
- }
- return 0;
-}
-
-/* helper function */
-static void __fill_code_cplbtab(struct cplb_tab *t, int i,
- u32 a_start, u32 a_end)
-{
- if (cplb_data[i].psize) {
- fill_cplbtab(t,
- cplb_data[i].start,
- cplb_data[i].end,
- cplb_data[i].psize,
- cplb_data[i].i_conf);
- } else {
-#if (defined(CONFIG_BLKFIN_CACHE) && defined(ANOMALY_05000263))
- if (i == SDRAM_KERN) {
- fill_cplbtab(t,
- cplb_data[i].start,
- cplb_data[i].end,
- SIZE_4M,
- cplb_data[i].i_conf);
- } else
-#endif
- {
- fill_cplbtab(t,
- cplb_data[i].start,
- a_start,
- SIZE_1M,
- cplb_data[i].i_conf);
- fill_cplbtab(t,
- a_start,
- a_end,
- SIZE_4M,
- cplb_data[i].i_conf);
- fill_cplbtab(t, a_end,
- cplb_data[i].end,
- SIZE_1M,
- cplb_data[i].i_conf);
- }
- }
-}
-
-static void __fill_data_cplbtab(struct cplb_tab *t, int i,
- u32 a_start, u32 a_end)
-{
- if (cplb_data[i].psize) {
- fill_cplbtab(t,
- cplb_data[i].start,
- cplb_data[i].end,
- cplb_data[i].psize,
- cplb_data[i].d_conf);
- } else {
- fill_cplbtab(t,
- cplb_data[i].start,
- a_start, SIZE_1M,
- cplb_data[i].d_conf);
- fill_cplbtab(t, a_start,
- a_end, SIZE_4M,
- cplb_data[i].d_conf);
- fill_cplbtab(t, a_end,
- cplb_data[i].end,
- SIZE_1M,
- cplb_data[i].d_conf);
- }
-}
-static void __init generate_cpl_tables(void)
-{
-
- u16 i, j, process;
- u32 a_start, a_end, as, ae, as_1m;
-
- struct cplb_tab *t_i = NULL;
- struct cplb_tab *t_d = NULL;
- struct s_cplb cplb;
-
- cplb.init_i.size = MAX_CPLBS;
- cplb.init_d.size = MAX_CPLBS;
- cplb.switch_i.size = MAX_SWITCH_I_CPLBS;
- cplb.switch_d.size = MAX_SWITCH_D_CPLBS;
-
- cplb.init_i.pos = 0;
- cplb.init_d.pos = 0;
- cplb.switch_i.pos = 0;
- cplb.switch_d.pos = 0;
-
- cplb.init_i.tab = icplb_table;
- cplb.init_d.tab = dcplb_table;
- cplb.switch_i.tab = ipdt_table;
- cplb.switch_d.tab = dpdt_table;
-
- cplb_data[SDRAM_KERN].end = memory_end;
-
-#ifdef CONFIG_MTD_UCLINUX
- cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start;
- cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size;
- cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0;
-# if defined(CONFIG_ROMFS_FS)
- cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB;
-
- /*
- * The ROMFS_FS size is often not multiple of 1MB.
- * This can cause multiple CPLB sets covering the same memory area.
- * This will then cause multiple CPLB hit exceptions.
- * Workaround: We ensure a contiguous memory area by extending the kernel
- * memory section over the mtd section.
- * For ROMFS_FS memory must be covered with ICPLBs anyways.
- * So there is no difference between kernel and mtd memory setup.
- */
-
- cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;;
- cplb_data[SDRAM_RAM_MTD].valid = 0;
-
-# endif
-#else
- cplb_data[SDRAM_RAM_MTD].valid = 0;
-#endif
-
- cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION;
- cplb_data[SDRAM_DMAZ].end = _ramend;
-
- cplb_data[RES_MEM].start = _ramend;
- cplb_data[RES_MEM].end = physical_mem_end;
-
- if (reserved_mem_dcache_on)
- cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC;
- else
- cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL;
-
- if (reserved_mem_icache_on)
- cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC;
- else
- cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL;
-
- for (i = ZERO_P; i <= L2_MEM; i++) {
- if (!cplb_data[i].valid)
- continue;
-
- as_1m = cplb_data[i].start % SIZE_1M;
-
- /*
- * We need to make sure all sections are properly 1M aligned
- * However between Kernel Memory and the Kernel mtd section,
- * depending on the rootfs size, there can be overlapping
- * memory areas.
- */
-
- if (as_1m && i != L1I_MEM && i != L1D_MEM) {
-#ifdef CONFIG_MTD_UCLINUX
- if (i == SDRAM_RAM_MTD) {
- if ((cplb_data[SDRAM_KERN].end + 1) >
- cplb_data[SDRAM_RAM_MTD].start)
- cplb_data[SDRAM_RAM_MTD].start =
- (cplb_data[i].start &
- (-2*SIZE_1M)) + SIZE_1M;
- else
- cplb_data[SDRAM_RAM_MTD].start =
- (cplb_data[i].start &
- (-2*SIZE_1M));
- } else
-#endif
- printk(KERN_WARNING
- "Unaligned Start of %s at 0x%X\n",
- cplb_data[i].name, cplb_data[i].start);
- }
-
- as = cplb_data[i].start % SIZE_4M;
- ae = cplb_data[i].end % SIZE_4M;
-
- if (as)
- a_start = cplb_data[i].start + (SIZE_4M - (as));
- else
- a_start = cplb_data[i].start;
-
- a_end = cplb_data[i].end - ae;
-
- for (j = INITIAL_T; j <= SWITCH_T; j++) {
-
- switch (j) {
- case INITIAL_T:
- if (cplb_data[i].attr & INITIAL_T) {
- t_i = &cplb.init_i;
- t_d = &cplb.init_d;
- process = 1;
- } else
- process = 0;
- break;
- case SWITCH_T:
- if (cplb_data[i].attr & SWITCH_T) {
- t_i = &cplb.switch_i;
- t_d = &cplb.switch_d;
- process = 1;
- } else
- process = 0;
- break;
- default:
- process = 0;
- break;
- }
-
- if (!process)
- continue;
- if (cplb_data[i].attr & I_CPLB)
- __fill_code_cplbtab(t_i, i, a_start, a_end);
-
- if (cplb_data[i].attr & D_CPLB)
- __fill_data_cplbtab(t_d, i, a_start, a_end);
- }
- }
-
-/* close tables */
-
- close_cplbtab(&cplb.init_i);
- close_cplbtab(&cplb.init_d);
-
- cplb.init_i.tab[cplb.init_i.pos] = -1;
- cplb.init_d.tab[cplb.init_d.pos] = -1;
- cplb.switch_i.tab[cplb.switch_i.pos] = -1;
- cplb.switch_d.tab[cplb.switch_d.pos] = -1;
-
-}
-
-#endif
-
static u_long get_vco(void)
{
u_long msel;
.text
+#ifdef ANOMALY_05000125
#if defined(CONFIG_BLKFIN_CACHE)
-ENTRY(_bfin_icache_init)
+ENTRY(_bfin_write_IMEM_CONTROL)
- /* Initialize Instruction CPLBS */
-
- I0.L = (ICPLB_ADDR0 & 0xFFFF);
- I0.H = (ICPLB_ADDR0 >> 16);
-
- I1.L = (ICPLB_DATA0 & 0xFFFF);
- I1.H = (ICPLB_DATA0 >> 16);
-
- I2.L = _icplb_table;
- I2.H = _icplb_table;
-
- r1 = -1; /* end point comparison */
- r3 = 15; /* max counter */
-
-/* read entries from table */
-
-.Lread_iaddr:
- R0 = [I2++];
- CC = R0 == R1;
- IF CC JUMP .Lidone;
- [I0++] = R0;
-
-.Lread_idata:
- R2 = [I2++];
- [I1++] = R2;
- R3 = R3 + R1;
- CC = R3 == R1;
- IF !CC JUMP .Lread_iaddr;
-
-.Lidone:
/* Enable Instruction Cache */
P0.l = (IMEM_CONTROL & 0xFFFF);
P0.h = (IMEM_CONTROL >> 16);
- R1 = [P0];
- R0 = (IMC | ENICPLB);
- R0 = R0 | R1;
/* Anomaly 05000125 */
- CLI R2;
+ CLI R1;
SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
.align 8;
[P0] = R0;
SSYNC;
- STI R2;
+ STI R1;
RTS;
-ENDPROC(_bfin_icache_init)
+ENDPROC(_bfin_write_IMEM_CONTROL)
#endif
#if defined(CONFIG_BLKFIN_DCACHE)
-ENTRY(_bfin_dcache_init)
-
- /* Initialize Data CPLBS */
-
- I0.L = (DCPLB_ADDR0 & 0xFFFF);
- I0.H = (DCPLB_ADDR0 >> 16);
-
- I1.L = (DCPLB_DATA0 & 0xFFFF);
- I1.H = (DCPLB_DATA0 >> 16);
-
- I2.L = _dcplb_table;
- I2.H = _dcplb_table;
-
- R1 = -1; /* end point comparison */
- R3 = 15; /* max counter */
-
- /* read entries from table */
-.Lread_daddr:
- R0 = [I2++];
- cc = R0 == R1;
- IF CC JUMP .Lddone;
- [I0++] = R0;
-
-.Lread_ddata:
- R2 = [I2++];
- [I1++] = R2;
- R3 = R3 + R1;
- CC = R3 == R1;
- IF !CC JUMP .Lread_daddr;
-.Lddone:
- P0.L = (DMEM_CONTROL & 0xFFFF);
- P0.H = (DMEM_CONTROL >> 16);
- R1 = [P0];
-
- R0 = DMEM_CNTR;
-
- R0 = R0 | R1;
- /* Anomaly 05000125 */
- CLI R2;
+ENTRY(_bfin_write_DMEM_CONTROL)
+ CLI R1;
SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
.align 8;
[P0] = R0;
SSYNC;
- STI R2;
+ STI R1;
RTS;
-ENDPROC(_bfin_dcache_init)
+ENDPROC(_bfin_write_DMEM_CONTROL)
+#endif
+
#endif
u16 size;
};
-u_long icplb_table[MAX_CPLBS+1];
-u_long dcplb_table[MAX_CPLBS+1];
+extern u_long icplb_table[MAX_CPLBS+1];
+extern u_long dcplb_table[MAX_CPLBS+1];
/* Till here we are discussing about the static memory management model.
* However, the operating envoronments commonly define more CPLB
*/
#ifdef CONFIG_CPLB_SWITCH_TAB_L1
-u_long ipdt_table[MAX_SWITCH_I_CPLBS+1]__attribute__((l1_data));
-u_long dpdt_table[MAX_SWITCH_D_CPLBS+1]__attribute__((l1_data));
+extern u_long ipdt_table[MAX_SWITCH_I_CPLBS+1]__attribute__((l1_data));
+extern u_long dpdt_table[MAX_SWITCH_D_CPLBS+1]__attribute__((l1_data));
#ifdef CONFIG_CPLB_INFO
-u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS]__attribute__((l1_data));
-u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS]__attribute__((l1_data));
+extern u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS]__attribute__((l1_data));
+extern u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS]__attribute__((l1_data));
#endif /* CONFIG_CPLB_INFO */
#else
-u_long ipdt_table[MAX_SWITCH_I_CPLBS+1];
-u_long dpdt_table[MAX_SWITCH_D_CPLBS+1];
+extern u_long ipdt_table[MAX_SWITCH_I_CPLBS+1];
+extern u_long dpdt_table[MAX_SWITCH_D_CPLBS+1];
#ifdef CONFIG_CPLB_INFO
-u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS];
-u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS];
+extern u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS];
+extern u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS];
#endif /* CONFIG_CPLB_INFO */
#endif /*CONFIG_CPLB_SWITCH_TAB_L1*/
-struct s_cplb {
- struct cplb_tab init_i;
- struct cplb_tab init_d;
- struct cplb_tab switch_i;
- struct cplb_tab switch_d;
-};
+extern unsigned long reserved_mem_dcache_on;
+extern unsigned long reserved_mem_icache_on;
-#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE)
-static struct cplb_desc cplb_data[] = {
- {
- .start = 0,
- .end = SIZE_1K,
- .psize = SIZE_1K,
- .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
- .i_conf = SDRAM_OOPS,
- .d_conf = SDRAM_OOPS,
-#if defined(CONFIG_DEBUG_HUNT_FOR_ZERO)
- .valid = 1,
-#else
- .valid = 0,
-#endif
- .name = "ZERO Pointer Saveguard",
- },
- {
- .start = L1_CODE_START,
- .end = L1_CODE_START + L1_CODE_LENGTH,
- .psize = SIZE_4M,
- .attr = INITIAL_T | SWITCH_T | I_CPLB,
- .i_conf = L1_IMEMORY,
- .d_conf = 0,
- .valid = 1,
- .name = "L1 I-Memory",
- },
- {
- .start = L1_DATA_A_START,
- .end = L1_DATA_B_START + L1_DATA_B_LENGTH,
- .psize = SIZE_4M,
- .attr = INITIAL_T | SWITCH_T | D_CPLB,
- .i_conf = 0,
- .d_conf = L1_DMEMORY,
-#if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0))
- .valid = 1,
-#else
- .valid = 0,
-#endif
- .name = "L1 D-Memory",
- },
- {
- .start = 0,
- .end = 0, /* dynamic */
- .psize = 0,
- .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
- .i_conf = SDRAM_IGENERIC,
- .d_conf = SDRAM_DGENERIC,
- .valid = 1,
- .name = "SDRAM Kernel",
- },
- {
- .start = 0, /* dynamic */
- .end = 0, /* dynamic */
- .psize = 0,
- .attr = INITIAL_T | SWITCH_T | D_CPLB,
- .i_conf = SDRAM_IGENERIC,
- .d_conf = SDRAM_DNON_CHBL,
- .valid = 1,
- .name = "SDRAM RAM MTD",
- },
- {
- .start = 0, /* dynamic */
- .end = 0, /* dynamic */
- .psize = SIZE_1M,
- .attr = INITIAL_T | SWITCH_T | D_CPLB,
- .d_conf = SDRAM_DNON_CHBL,
- .valid = 1,//(DMA_UNCACHED_REGION > 0),
- .name = "SDRAM Uncached DMA ZONE",
- },
- {
- .start = 0, /* dynamic */
- .end = 0, /* dynamic */
- .psize = 0,
- .attr = SWITCH_T | D_CPLB,
- .i_conf = 0, /* dynamic */
- .d_conf = 0, /* dynamic */
- .valid = 1,
- .name = "SDRAM Reserved Memory",
- },
- {
- .start = ASYNC_BANK0_BASE,
- .end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE,
- .psize = 0,
- .attr = SWITCH_T | D_CPLB,
- .d_conf = SDRAM_EBIU,
- .valid = 1,
- .name = "ASYNC Memory",
- },
- {
-#if defined(CONFIG_BF561)
- .start = L2_SRAM,
- .end = L2_SRAM_END,
- .psize = SIZE_1M,
- .attr = SWITCH_T | D_CPLB,
- .i_conf = L2_MEMORY,
- .d_conf = L2_MEMORY,
- .valid = 1,
-#else
- .valid = 0,
-#endif
- .name = "L2 Memory",
- }
-};
-#endif
+extern void generate_cpl_tables(void);
#define bfin_write_SRAM_BASE_ADDRESS(val) bfin_write32(SRAM_BASE_ADDRESS,val)
#define bfin_read_DMEM_CONTROL() bfin_read32(DMEM_CONTROL)
#ifdef ANOMALY_05000125
-static __inline__ void bfin_write_DMEM_CONTROL(unsigned int val)
-{
- unsigned long flags, iwr;
-
- local_irq_save(flags);
- __asm__(".align 8\n");
- bfin_write32(IMEM_CONTROL, val);
- __builtin_bfin_ssync();
- local_irq_restore(flags);
-}
+extern void bfin_write_DMEM_CONTROL(unsigned int val);
#else
#define bfin_write_DMEM_CONTROL(val) bfin_write32(DMEM_CONTROL,val)
#endif
*/
#define bfin_read_IMEM_CONTROL() bfin_read32(IMEM_CONTROL)
#ifdef ANOMALY_05000125
-static __inline__ void bfin_write_IMEM_CONTROL(unsigned int val)
-{
- unsigned long flags, iwr;
-
- local_irq_save(flags);
- __asm__(".align 8\n");
- bfin_write32(IMEM_CONTROL, val);
- __builtin_bfin_ssync();
- local_irq_restore(flags);
-
-}
+extern void bfin_write_IMEM_CONTROL(unsigned int val);
#else
#define bfin_write_IMEM_CONTROL(val) bfin_write32(IMEM_CONTROL,val)
#endif