1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/module.h>
5 #include <linux/sched.h>
8 #include <asm/tlbflush.h>
9 #include <asm/cacheflush.h>
10 #include <asm/l2_cache.h>
11 #include <nds32_intrinsic.h>
13 #include <asm/cache_info.h>
14 extern struct cache_info L1_cache_info[2];
16 int va_kernel_present(unsigned long addr)
21 pmd = pmd_off_k(addr);
22 if (!pmd_none(*pmd)) {
23 ptep = pte_offset_map(pmd, addr);
31 pte_t va_present(struct mm_struct * mm, unsigned long addr)
39 pgd = pgd_offset(mm, addr);
40 if (!pgd_none(*pgd)) {
41 p4d = p4d_offset(pgd, addr);
42 if (!p4d_none(*p4d)) {
43 pud = pud_offset(p4d, addr);
44 if (!pud_none(*pud)) {
45 pmd = pmd_offset(pud, addr);
46 if (!pmd_none(*pmd)) {
47 ptep = pte_offset_map(pmd, addr);
59 int va_readable(struct pt_regs *regs, unsigned long addr)
61 struct mm_struct *mm = current->mm;
65 if (user_mode(regs)) {
67 pte = va_present(mm, addr);
68 if (!pte && pte_read(pte))
71 /* superuser mode is always readable, so we can only
72 * check it is present or not*/
73 return (! !va_kernel_present(addr));
78 int va_writable(struct pt_regs *regs, unsigned long addr)
80 struct mm_struct *mm = current->mm;
84 if (user_mode(regs)) {
86 pte = va_present(mm, addr);
87 if (!pte && pte_write(pte))
91 pte = va_kernel_present(addr);
92 if (!pte && pte_kernel_write(pte))
101 void cpu_icache_inval_all(void)
103 unsigned long end, line_size;
105 line_size = L1_cache_info[ICACHE].line_size;
107 line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
111 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
113 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
115 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
117 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
122 void cpu_dcache_inval_all(void)
124 __nds32__cctl_l1d_invalall();
127 #ifdef CONFIG_CACHE_L2
128 void dcache_wb_all_level(void)
130 unsigned long flags, cmd;
131 local_irq_save(flags);
132 __nds32__cctl_l1d_wball_alvl();
133 /* Section 1: Ensure the section 2 & 3 program code execution after */
134 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
136 /* Section 2: Confirm the writeback all level is done in CPU and L2C */
137 cmd = CCTL_CMD_L2_SYNC;
139 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
142 /* Section 3: Writeback whole L2 cache */
143 cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
145 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
147 __nds32__msync_all();
148 local_irq_restore(flags);
150 EXPORT_SYMBOL(dcache_wb_all_level);
153 void cpu_dcache_wb_all(void)
155 __nds32__cctl_l1d_wball_one_lvl();
156 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
159 void cpu_dcache_wbinval_all(void)
161 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
163 local_irq_save(flags);
166 cpu_dcache_inval_all();
167 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
168 local_irq_restore(flags);
175 void cpu_icache_inval_page(unsigned long start)
177 unsigned long line_size, end;
179 line_size = L1_cache_info[ICACHE].line_size;
180 end = start + PAGE_SIZE;
184 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
186 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
188 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
190 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
191 } while (end != start);
195 void cpu_dcache_inval_page(unsigned long start)
197 unsigned long line_size, end;
199 line_size = L1_cache_info[DCACHE].line_size;
200 end = start + PAGE_SIZE;
204 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
206 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
208 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
210 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
211 } while (end != start);
214 void cpu_dcache_wb_page(unsigned long start)
216 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
217 unsigned long line_size, end;
219 line_size = L1_cache_info[DCACHE].line_size;
220 end = start + PAGE_SIZE;
224 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
226 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
228 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
230 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
231 } while (end != start);
232 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
236 void cpu_dcache_wbinval_page(unsigned long start)
238 unsigned long line_size, end;
240 line_size = L1_cache_info[DCACHE].line_size;
241 end = start + PAGE_SIZE;
245 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
246 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
248 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
250 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
251 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
253 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
255 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
256 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
258 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
260 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
261 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
263 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
264 } while (end != start);
265 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
268 void cpu_cache_wbinval_page(unsigned long page, int flushi)
270 cpu_dcache_wbinval_page(page);
272 cpu_icache_inval_page(page);
278 void cpu_icache_inval_range(unsigned long start, unsigned long end)
280 unsigned long line_size;
282 line_size = L1_cache_info[ICACHE].line_size;
284 while (end > start) {
285 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
291 void cpu_dcache_inval_range(unsigned long start, unsigned long end)
293 unsigned long line_size;
295 line_size = L1_cache_info[DCACHE].line_size;
297 while (end > start) {
298 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
303 void cpu_dcache_wb_range(unsigned long start, unsigned long end)
305 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
306 unsigned long line_size;
308 line_size = L1_cache_info[DCACHE].line_size;
310 while (end > start) {
311 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
314 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
318 void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
320 unsigned long line_size;
322 line_size = L1_cache_info[DCACHE].line_size;
324 while (end > start) {
325 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
326 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
328 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
331 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
334 void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
336 unsigned long line_size, align_start, align_end;
338 line_size = L1_cache_info[DCACHE].line_size;
339 align_start = start & ~(line_size - 1);
340 align_end = (end + line_size - 1) & ~(line_size - 1);
341 cpu_dcache_wbinval_range(align_start, align_end);
344 line_size = L1_cache_info[ICACHE].line_size;
345 align_start = start & ~(line_size - 1);
346 align_end = (end + line_size - 1) & ~(line_size - 1);
347 cpu_icache_inval_range(align_start, align_end);
351 void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
352 unsigned long start, unsigned long end,
353 bool flushi, bool wbd)
355 unsigned long line_size, t_start, t_end;
359 line_size = L1_cache_info[DCACHE].line_size;
360 start = start & ~(line_size - 1);
361 end = (end + line_size - 1) & ~(line_size - 1);
363 if ((end - start) > (8 * PAGE_SIZE)) {
365 cpu_dcache_wbinval_all();
367 cpu_icache_inval_all();
371 t_start = (start + PAGE_SIZE) & PAGE_MASK;
372 t_end = ((end - 1) & PAGE_MASK);
374 if ((start & PAGE_MASK) == t_end) {
375 if (va_present(vma->vm_mm, start)) {
377 cpu_dcache_wbinval_range(start, end);
379 cpu_icache_inval_range(start, end);
384 if (va_present(vma->vm_mm, start)) {
386 cpu_dcache_wbinval_range(start, t_start);
388 cpu_icache_inval_range(start, t_start);
391 if (va_present(vma->vm_mm, end - 1)) {
393 cpu_dcache_wbinval_range(t_end, end);
395 cpu_icache_inval_range(t_end, end);
398 while (t_start < t_end) {
399 if (va_present(vma->vm_mm, t_start)) {
401 cpu_dcache_wbinval_page(t_start);
403 cpu_icache_inval_page(t_start);
405 t_start += PAGE_SIZE;
409 #ifdef CONFIG_CACHE_L2
410 static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
413 unsigned long p_start = __pa(start);
414 unsigned long p_end = __pa(end);
416 unsigned long line_size;
417 /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
418 line_size = L2_CACHE_LINE_SIZE();
419 p_start = p_start & (~(line_size - 1));
420 p_end = (p_end + line_size - 1) & (~(line_size - 1));
422 (p_start & ~(line_size - 1)) | op |
426 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
428 p_start += line_size;
429 } while (p_end > p_start);
430 cmd = CCTL_CMD_L2_SYNC;
432 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
437 #define cpu_l2cache_op(start,end,op) do { } while (0)
442 void cpu_dma_wb_range(unsigned long start, unsigned long end)
444 unsigned long line_size;
446 line_size = L1_cache_info[DCACHE].line_size;
447 start = start & (~(line_size - 1));
448 end = (end + line_size - 1) & (~(line_size - 1));
449 if (unlikely(start == end))
452 local_irq_save(flags);
453 cpu_dcache_wb_range(start, end);
454 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
455 __nds32__msync_all();
456 local_irq_restore(flags);
459 void cpu_dma_inval_range(unsigned long start, unsigned long end)
461 unsigned long line_size;
462 unsigned long old_start = start;
463 unsigned long old_end = end;
465 line_size = L1_cache_info[DCACHE].line_size;
466 start = start & (~(line_size - 1));
467 end = (end + line_size - 1) & (~(line_size - 1));
468 if (unlikely(start == end))
470 local_irq_save(flags);
471 if (start != old_start) {
472 cpu_dcache_wbinval_range(start, start + line_size);
473 cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
475 if (end != old_end) {
476 cpu_dcache_wbinval_range(end - line_size, end);
477 cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
479 cpu_dcache_inval_range(start, end);
480 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
481 __nds32__msync_all();
482 local_irq_restore(flags);
486 void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
488 unsigned long line_size;
490 line_size = L1_cache_info[DCACHE].line_size;
491 start = start & (~(line_size - 1));
492 end = (end + line_size - 1) & (~(line_size - 1));
493 if (unlikely(start == end))
496 local_irq_save(flags);
497 cpu_dcache_wbinval_range(start, end);
498 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
499 __nds32__msync_all();
500 local_irq_restore(flags);
503 void cpu_proc_init(void)
507 void cpu_proc_fin(void)
511 void cpu_do_idle(void)
513 __nds32__standby_no_wake_grant();
516 void cpu_reset(unsigned long reset)
520 tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
521 tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
522 __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
523 cpu_dcache_wbinval_all();
524 cpu_icache_inval_all();
526 __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
529 void cpu_switch_mm(struct mm_struct *mm)
532 cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
533 cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
534 __nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
535 __nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);