mm: don't include asm/pgtable.h if linux/mm.h is already included
[platform/kernel/linux-starfive.git] / arch / nds32 / mm / proc.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/mm.h>
7 #include <asm/nds32.h>
8 #include <asm/tlbflush.h>
9 #include <asm/cacheflush.h>
10 #include <asm/l2_cache.h>
11 #include <nds32_intrinsic.h>
12
13 #include <asm/cache_info.h>
14 extern struct cache_info L1_cache_info[2];
15
16 int va_kernel_present(unsigned long addr)
17 {
18         p4d_t *p4d;
19         pud_t *pud;
20         pmd_t *pmd;
21         pte_t *ptep, pte;
22
23         p4d = p4d_offset(pgd_offset_k(addr), addr);
24         pud = pud_offset(p4d, addr);
25         pmd = pmd_offset(pud, addr);
26         if (!pmd_none(*pmd)) {
27                 ptep = pte_offset_map(pmd, addr);
28                 pte = *ptep;
29                 if (pte_present(pte))
30                         return pte;
31         }
32         return 0;
33 }
34
35 pte_t va_present(struct mm_struct * mm, unsigned long addr)
36 {
37         pgd_t *pgd;
38         p4d_t *p4d;
39         pud_t *pud;
40         pmd_t *pmd;
41         pte_t *ptep, pte;
42
43         pgd = pgd_offset(mm, addr);
44         if (!pgd_none(*pgd)) {
45                 p4d = p4d_offset(pgd, addr);
46                 if (!p4d_none(*p4d)) {
47                         pud = pud_offset(p4d, addr);
48                         if (!pud_none(*pud)) {
49                                 pmd = pmd_offset(pud, addr);
50                                 if (!pmd_none(*pmd)) {
51                                         ptep = pte_offset_map(pmd, addr);
52                                         pte = *ptep;
53                                         if (pte_present(pte))
54                                                 return pte;
55                                 }
56                         }
57                 }
58         }
59         return 0;
60
61 }
62
63 int va_readable(struct pt_regs *regs, unsigned long addr)
64 {
65         struct mm_struct *mm = current->mm;
66         pte_t pte;
67         int ret = 0;
68
69         if (user_mode(regs)) {
70                 /* user mode */
71                 pte = va_present(mm, addr);
72                 if (!pte && pte_read(pte))
73                         ret = 1;
74         } else {
75                 /* superuser mode is always readable, so we can only
76                  * check it is present or not*/
77                 return (! !va_kernel_present(addr));
78         }
79         return ret;
80 }
81
82 int va_writable(struct pt_regs *regs, unsigned long addr)
83 {
84         struct mm_struct *mm = current->mm;
85         pte_t pte;
86         int ret = 0;
87
88         if (user_mode(regs)) {
89                 /* user mode */
90                 pte = va_present(mm, addr);
91                 if (!pte && pte_write(pte))
92                         ret = 1;
93         } else {
94                 /* superuser mode */
95                 pte = va_kernel_present(addr);
96                 if (!pte && pte_kernel_write(pte))
97                         ret = 1;
98         }
99         return ret;
100 }
101
102 /*
103  * All
104  */
105 void cpu_icache_inval_all(void)
106 {
107         unsigned long end, line_size;
108
109         line_size = L1_cache_info[ICACHE].line_size;
110         end =
111             line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
112
113         do {
114                 end -= line_size;
115                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
116                 end -= line_size;
117                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
118                 end -= line_size;
119                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
120                 end -= line_size;
121                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
122         } while (end > 0);
123         __nds32__isb();
124 }
125
126 void cpu_dcache_inval_all(void)
127 {
128         __nds32__cctl_l1d_invalall();
129 }
130
131 #ifdef CONFIG_CACHE_L2
132 void dcache_wb_all_level(void)
133 {
134         unsigned long flags, cmd;
135         local_irq_save(flags);
136         __nds32__cctl_l1d_wball_alvl();
137         /* Section 1: Ensure the section 2 & 3 program code execution after */
138         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
139
140         /* Section 2: Confirm the writeback all level is done in CPU and L2C */
141         cmd = CCTL_CMD_L2_SYNC;
142         L2_CMD_RDY();
143         L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
144         L2_CMD_RDY();
145
146         /* Section 3: Writeback whole L2 cache */
147         cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
148         L2_CMD_RDY();
149         L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
150         L2_CMD_RDY();
151         __nds32__msync_all();
152         local_irq_restore(flags);
153 }
154 EXPORT_SYMBOL(dcache_wb_all_level);
155 #endif
156
157 void cpu_dcache_wb_all(void)
158 {
159         __nds32__cctl_l1d_wball_one_lvl();
160         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
161 }
162
163 void cpu_dcache_wbinval_all(void)
164 {
165 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
166         unsigned long flags;
167         local_irq_save(flags);
168 #endif
169         cpu_dcache_wb_all();
170         cpu_dcache_inval_all();
171 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
172         local_irq_restore(flags);
173 #endif
174 }
175
176 /*
177  * Page
178  */
179 void cpu_icache_inval_page(unsigned long start)
180 {
181         unsigned long line_size, end;
182
183         line_size = L1_cache_info[ICACHE].line_size;
184         end = start + PAGE_SIZE;
185
186         do {
187                 end -= line_size;
188                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
189                 end -= line_size;
190                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
191                 end -= line_size;
192                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
193                 end -= line_size;
194                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
195         } while (end != start);
196         __nds32__isb();
197 }
198
199 void cpu_dcache_inval_page(unsigned long start)
200 {
201         unsigned long line_size, end;
202
203         line_size = L1_cache_info[DCACHE].line_size;
204         end = start + PAGE_SIZE;
205
206         do {
207                 end -= line_size;
208                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
209                 end -= line_size;
210                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
211                 end -= line_size;
212                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
213                 end -= line_size;
214                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
215         } while (end != start);
216 }
217
218 void cpu_dcache_wb_page(unsigned long start)
219 {
220 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
221         unsigned long line_size, end;
222
223         line_size = L1_cache_info[DCACHE].line_size;
224         end = start + PAGE_SIZE;
225
226         do {
227                 end -= line_size;
228                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
229                 end -= line_size;
230                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
231                 end -= line_size;
232                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
233                 end -= line_size;
234                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
235         } while (end != start);
236         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
237 #endif
238 }
239
240 void cpu_dcache_wbinval_page(unsigned long start)
241 {
242         unsigned long line_size, end;
243
244         line_size = L1_cache_info[DCACHE].line_size;
245         end = start + PAGE_SIZE;
246
247         do {
248                 end -= line_size;
249 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
250                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
251 #endif
252                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
253                 end -= line_size;
254 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
255                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
256 #endif
257                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
258                 end -= line_size;
259 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
260                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
261 #endif
262                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
263                 end -= line_size;
264 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
265                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
266 #endif
267                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
268         } while (end != start);
269         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
270 }
271
272 void cpu_cache_wbinval_page(unsigned long page, int flushi)
273 {
274         cpu_dcache_wbinval_page(page);
275         if (flushi)
276                 cpu_icache_inval_page(page);
277 }
278
279 /*
280  * Range
281  */
282 void cpu_icache_inval_range(unsigned long start, unsigned long end)
283 {
284         unsigned long line_size;
285
286         line_size = L1_cache_info[ICACHE].line_size;
287
288         while (end > start) {
289                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
290                 start += line_size;
291         }
292         __nds32__isb();
293 }
294
295 void cpu_dcache_inval_range(unsigned long start, unsigned long end)
296 {
297         unsigned long line_size;
298
299         line_size = L1_cache_info[DCACHE].line_size;
300
301         while (end > start) {
302                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
303                 start += line_size;
304         }
305 }
306
307 void cpu_dcache_wb_range(unsigned long start, unsigned long end)
308 {
309 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
310         unsigned long line_size;
311
312         line_size = L1_cache_info[DCACHE].line_size;
313
314         while (end > start) {
315                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
316                 start += line_size;
317         }
318         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
319 #endif
320 }
321
322 void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
323 {
324         unsigned long line_size;
325
326         line_size = L1_cache_info[DCACHE].line_size;
327
328         while (end > start) {
329 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
330                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
331 #endif
332                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
333                 start += line_size;
334         }
335         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
336 }
337
338 void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
339 {
340         unsigned long line_size, align_start, align_end;
341
342         line_size = L1_cache_info[DCACHE].line_size;
343         align_start = start & ~(line_size - 1);
344         align_end = (end + line_size - 1) & ~(line_size - 1);
345         cpu_dcache_wbinval_range(align_start, align_end);
346
347         if (flushi) {
348                 line_size = L1_cache_info[ICACHE].line_size;
349                 align_start = start & ~(line_size - 1);
350                 align_end = (end + line_size - 1) & ~(line_size - 1);
351                 cpu_icache_inval_range(align_start, align_end);
352         }
353 }
354
355 void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
356                                    unsigned long start, unsigned long end,
357                                    bool flushi, bool wbd)
358 {
359         unsigned long line_size, t_start, t_end;
360
361         if (!flushi && !wbd)
362                 return;
363         line_size = L1_cache_info[DCACHE].line_size;
364         start = start & ~(line_size - 1);
365         end = (end + line_size - 1) & ~(line_size - 1);
366
367         if ((end - start) > (8 * PAGE_SIZE)) {
368                 if (wbd)
369                         cpu_dcache_wbinval_all();
370                 if (flushi)
371                         cpu_icache_inval_all();
372                 return;
373         }
374
375         t_start = (start + PAGE_SIZE) & PAGE_MASK;
376         t_end = ((end - 1) & PAGE_MASK);
377
378         if ((start & PAGE_MASK) == t_end) {
379                 if (va_present(vma->vm_mm, start)) {
380                         if (wbd)
381                                 cpu_dcache_wbinval_range(start, end);
382                         if (flushi)
383                                 cpu_icache_inval_range(start, end);
384                 }
385                 return;
386         }
387
388         if (va_present(vma->vm_mm, start)) {
389                 if (wbd)
390                         cpu_dcache_wbinval_range(start, t_start);
391                 if (flushi)
392                         cpu_icache_inval_range(start, t_start);
393         }
394
395         if (va_present(vma->vm_mm, end - 1)) {
396                 if (wbd)
397                         cpu_dcache_wbinval_range(t_end, end);
398                 if (flushi)
399                         cpu_icache_inval_range(t_end, end);
400         }
401
402         while (t_start < t_end) {
403                 if (va_present(vma->vm_mm, t_start)) {
404                         if (wbd)
405                                 cpu_dcache_wbinval_page(t_start);
406                         if (flushi)
407                                 cpu_icache_inval_page(t_start);
408                 }
409                 t_start += PAGE_SIZE;
410         }
411 }
412
413 #ifdef CONFIG_CACHE_L2
414 static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
415 {
416         if (atl2c_base) {
417                 unsigned long p_start = __pa(start);
418                 unsigned long p_end = __pa(end);
419                 unsigned long cmd;
420                 unsigned long line_size;
421                 /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
422                 line_size = L2_CACHE_LINE_SIZE();
423                 p_start = p_start & (~(line_size - 1));
424                 p_end = (p_end + line_size - 1) & (~(line_size - 1));
425                 cmd =
426                     (p_start & ~(line_size - 1)) | op |
427                     CCTL_SINGLE_CMD;
428                 do {
429                         L2_CMD_RDY();
430                         L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
431                         cmd += line_size;
432                         p_start += line_size;
433                 } while (p_end > p_start);
434                 cmd = CCTL_CMD_L2_SYNC;
435                 L2_CMD_RDY();
436                 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
437                 L2_CMD_RDY();
438         }
439 }
440 #else
441 #define cpu_l2cache_op(start,end,op) do { } while (0)
442 #endif
443 /*
444  * DMA
445  */
446 void cpu_dma_wb_range(unsigned long start, unsigned long end)
447 {
448         unsigned long line_size;
449         unsigned long flags;
450         line_size = L1_cache_info[DCACHE].line_size;
451         start = start & (~(line_size - 1));
452         end = (end + line_size - 1) & (~(line_size - 1));
453         if (unlikely(start == end))
454                 return;
455
456         local_irq_save(flags);
457         cpu_dcache_wb_range(start, end);
458         cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
459         __nds32__msync_all();
460         local_irq_restore(flags);
461 }
462
463 void cpu_dma_inval_range(unsigned long start, unsigned long end)
464 {
465         unsigned long line_size;
466         unsigned long old_start = start;
467         unsigned long old_end = end;
468         unsigned long flags;
469         line_size = L1_cache_info[DCACHE].line_size;
470         start = start & (~(line_size - 1));
471         end = (end + line_size - 1) & (~(line_size - 1));
472         if (unlikely(start == end))
473                 return;
474         local_irq_save(flags);
475         if (start != old_start) {
476                 cpu_dcache_wbinval_range(start, start + line_size);
477                 cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
478         }
479         if (end != old_end) {
480                 cpu_dcache_wbinval_range(end - line_size, end);
481                 cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
482         }
483         cpu_dcache_inval_range(start, end);
484         cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
485         __nds32__msync_all();
486         local_irq_restore(flags);
487
488 }
489
490 void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
491 {
492         unsigned long line_size;
493         unsigned long flags;
494         line_size = L1_cache_info[DCACHE].line_size;
495         start = start & (~(line_size - 1));
496         end = (end + line_size - 1) & (~(line_size - 1));
497         if (unlikely(start == end))
498                 return;
499
500         local_irq_save(flags);
501         cpu_dcache_wbinval_range(start, end);
502         cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
503         __nds32__msync_all();
504         local_irq_restore(flags);
505 }
506
507 void cpu_proc_init(void)
508 {
509 }
510
511 void cpu_proc_fin(void)
512 {
513 }
514
515 void cpu_do_idle(void)
516 {
517         __nds32__standby_no_wake_grant();
518 }
519
520 void cpu_reset(unsigned long reset)
521 {
522         u32 tmp;
523         GIE_DISABLE();
524         tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
525         tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
526         __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
527         cpu_dcache_wbinval_all();
528         cpu_icache_inval_all();
529
530         __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
531 }
532
533 void cpu_switch_mm(struct mm_struct *mm)
534 {
535         unsigned long cid;
536         cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
537         cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
538         __nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
539         __nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
540 }