1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
9 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/cache.h>
13 #include <linux/mmu_context.h>
14 #include <linux/syscalls.h>
15 #include <linux/uaccess.h>
16 #include <linux/pagemap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cachectl.h>
19 #include <asm/setup.h>
21 #ifdef CONFIG_ISA_ARCV2
22 #define USE_RGN_FLSH 1
25 static int l2_line_sz;
26 static int ioc_exists;
27 int slc_enable = 1, ioc_enable = 1;
28 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
29 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
31 static struct cpuinfo_arc_cache {
32 unsigned int sz_k, line_len, colors;
33 } ic_info, dc_info, slc_info;
35 void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
36 unsigned long sz, const int op, const int full_page);
38 void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
39 void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
40 void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
42 static char *read_decode_cache_bcr_arcv2(int c, char *buf, int len)
44 struct cpuinfo_arc_cache *p_slc = &slc_info;
45 struct bcr_identity ident;
46 struct bcr_generic sbcr;
47 struct bcr_clust_cfg cbcr;
48 struct bcr_volatile vol;
51 READ_BCR(ARC_REG_SLC_BCR, sbcr);
53 struct bcr_slc_cfg slc_cfg;
54 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
55 p_slc->sz_k = 128 << slc_cfg.sz;
56 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
57 n += scnprintf(buf + n, len - n,
58 "SLC\t\t: %uK, %uB Line%s\n",
59 p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable));
62 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
67 * As for today we don't support both IOC and ZONE_HIGHMEM enabled
68 * simultaneously. This happens because as of today IOC aperture covers
69 * only ZONE_NORMAL (low mem) and any dma transactions outside this
70 * region won't be HW coherent.
71 * If we want to use both IOC and ZONE_HIGHMEM we can use
72 * bounce_buffer to handle dma transactions to HIGHMEM.
73 * Also it is possible to modify dma_direct cache ops or increase IOC
74 * aperture size if we are planning to use HIGHMEM without PAE.
76 if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
82 READ_BCR(AUX_IDENTITY, ident);
84 /* HS 2.0 didn't have AUX_VOL */
85 if (ident.family > 0x51) {
86 READ_BCR(AUX_VOL, vol);
87 perip_base = vol.start << 28;
88 /* HS 3.0 has limit and strict-ordering fields */
89 if (ident.family > 0x52)
90 perip_end = (vol.limit << 28) - 1;
93 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
95 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
100 char *arc_cache_mumbojumbo(int c, char *buf, int len)
102 struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info;
103 struct bcr_cache ibcr, dbcr;
107 READ_BCR(ARC_REG_IC_BCR, ibcr);
111 if (is_isa_arcompact() && (ibcr.ver <= 3)) {
112 BUG_ON(ibcr.config != 3);
113 assoc = 2; /* Fixed to 2w set assoc */
114 } else if (is_isa_arcv2() && (ibcr.ver >= 4)) {
115 assoc = 1 << ibcr.config; /* 1,2,4,8 */
118 p_ic->line_len = 8 << ibcr.line_len;
119 p_ic->sz_k = 1 << (ibcr.sz - 1);
120 p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
122 n += scnprintf(buf + n, len - n,
123 "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n",
124 p_ic->sz_k, assoc, p_ic->line_len,
125 p_ic->colors > 1 ? " aliasing" : "",
126 IS_USED_CFG(CONFIG_ARC_HAS_ICACHE));
129 READ_BCR(ARC_REG_DC_BCR, dbcr);
133 if (is_isa_arcompact() && (dbcr.ver <= 3)) {
134 BUG_ON(dbcr.config != 2);
136 assoc = 4; /* Fixed to 4w set assoc */
137 p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
138 } else if (is_isa_arcv2() && (dbcr.ver >= 4)) {
140 assoc = 1 << dbcr.config; /* 1,2,4,8 */
141 p_dc->colors = 1; /* PIPT so can't VIPT alias */
144 p_dc->line_len = 16 << dbcr.line_len;
145 p_dc->sz_k = 1 << (dbcr.sz - 1);
147 n += scnprintf(buf + n, len - n,
148 "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
149 p_dc->sz_k, assoc, p_dc->line_len,
150 vipt ? "VIPT" : "PIPT",
151 p_dc->colors > 1 ? " aliasing" : "",
152 IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
156 read_decode_cache_bcr_arcv2(c, buf + n, len - n);
162 * Line Operation on {I,D}-Cache
167 #define OP_FLUSH_N_INV 0x3
168 #define OP_INV_IC 0x4
171 * Cache Flush programming model
173 * ARC700 MMUv3 I$ and D$ are both VIPT and can potentially alias.
174 * Programming model requires both paddr and vaddr irrespecive of aliasing
176 * - vaddr in {I,D}C_IV?L
177 * - paddr in {I,D}C_PTAG
179 * In HS38x (MMUv4), D$ is PIPT, I$ is VIPT and can still alias.
180 * Programming model is different for aliasing vs. non-aliasing I$
181 * - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L
182 * - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$)
184 * - If PAE40 is enabled, independent of aliasing considerations, the higher
185 * bits needs to be written into PTAG_HI
189 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
190 unsigned long sz, const int op, const int full_page)
192 unsigned int aux_cmd, aux_tag;
195 if (op == OP_INV_IC) {
196 aux_cmd = ARC_REG_IC_IVIL;
197 aux_tag = ARC_REG_IC_PTAG;
199 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
200 aux_tag = ARC_REG_DC_PTAG;
203 /* Ensure we properly floor/ceil the non-line aligned/sized requests
204 * and have @paddr - aligned to cache line and integral @num_lines.
205 * This however can be avoided for page sized since:
206 * -@paddr will be cache-line aligned already (being page aligned)
207 * -@sz will be integral multiple of line size (being page sized).
210 sz += paddr & ~CACHE_LINE_MASK;
211 paddr &= CACHE_LINE_MASK;
212 vaddr &= CACHE_LINE_MASK;
214 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
217 * MMUv3, cache ops require paddr in PTAG reg
218 * if V-P const for loop, PTAG can be written once outside loop
221 write_aux_reg(aux_tag, paddr);
224 * This is technically for MMU v4, using the MMU v3 programming model
225 * Special work for HS38 aliasing I-cache configuration with PAE40
226 * - upper 8 bits of paddr need to be written into PTAG_HI
227 * - (and needs to be written before the lower 32 bits)
228 * Note that PTAG_HI is hoisted outside the line loop
230 if (is_pae40_enabled() && op == OP_INV_IC)
231 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
233 while (num_lines-- > 0) {
235 write_aux_reg(aux_tag, paddr);
236 paddr += L1_CACHE_BYTES;
239 write_aux_reg(aux_cmd, vaddr);
240 vaddr += L1_CACHE_BYTES;
249 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
250 unsigned long sz, const int op, const int full_page)
252 unsigned int aux_cmd;
255 if (op == OP_INV_IC) {
256 aux_cmd = ARC_REG_IC_IVIL;
258 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
259 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
262 /* Ensure we properly floor/ceil the non-line aligned/sized requests
263 * and have @paddr - aligned to cache line and integral @num_lines.
264 * This however can be avoided for page sized since:
265 * -@paddr will be cache-line aligned already (being page aligned)
266 * -@sz will be integral multiple of line size (being page sized).
269 sz += paddr & ~CACHE_LINE_MASK;
270 paddr &= CACHE_LINE_MASK;
273 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
276 * For HS38 PAE40 configuration
277 * - upper 8 bits of paddr need to be written into PTAG_HI
278 * - (and needs to be written before the lower 32 bits)
280 if (is_pae40_enabled()) {
283 * Non aliasing I-cache in HS38,
284 * aliasing I-cache handled in __cache_line_loop_v3()
286 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
288 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
291 while (num_lines-- > 0) {
292 write_aux_reg(aux_cmd, paddr);
293 paddr += L1_CACHE_BYTES;
300 * optimized flush operation which takes a region as opposed to iterating per line
303 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
304 unsigned long sz, const int op, const int full_page)
308 /* Only for Non aliasing I-cache in HS38 */
309 if (op == OP_INV_IC) {
313 s = ARC_REG_DC_STARTR;
318 /* for any leading gap between @paddr and start of cache line */
319 sz += paddr & ~CACHE_LINE_MASK;
320 paddr &= CACHE_LINE_MASK;
323 * account for any trailing gap to end of cache line
324 * this is equivalent to DIV_ROUND_UP() in line ops above
326 sz += L1_CACHE_BYTES - 1;
329 if (is_pae40_enabled()) {
330 /* TBD: check if crossing 4TB boundary */
332 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
334 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
337 /* ENDR needs to be set ahead of START */
338 write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
339 write_aux_reg(s, paddr);
341 /* caller waits on DC_CTRL.FS */
346 #ifdef CONFIG_ARC_MMU_V3
347 #define __cache_line_loop __cache_line_loop_v3
349 #define __cache_line_loop __cache_line_loop_v4
352 #ifdef CONFIG_ARC_HAS_DCACHE
354 /***************************************************************
355 * Machine specific helpers for Entire D-Cache or Per Line ops
360 * this version avoids extra read/write of DC_CTRL for flush or invalid ops
361 * in the non region flush regime (such as for ARCompact)
363 static inline void __before_dc_op(const int op)
365 if (op == OP_FLUSH_N_INV) {
366 /* Dcache provides 2 cmd: FLUSH or INV
367 * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
368 * flush-n-inv is achieved by INV cmd but with IM=1
369 * So toggle INV sub-mode depending on op request and default
371 const unsigned int ctl = ARC_REG_DC_CTRL;
372 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
378 static inline void __before_dc_op(const int op)
380 const unsigned int ctl = ARC_REG_DC_CTRL;
381 unsigned int val = read_aux_reg(ctl);
383 if (op == OP_FLUSH_N_INV) {
384 val |= DC_CTRL_INV_MODE_FLUSH;
387 if (op != OP_INV_IC) {
389 * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
390 * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
392 val &= ~DC_CTRL_RGN_OP_MSK;
394 val |= DC_CTRL_RGN_OP_INV;
396 write_aux_reg(ctl, val);
402 static inline void __after_dc_op(const int op)
405 const unsigned int ctl = ARC_REG_DC_CTRL;
408 /* flush / flush-n-inv both wait */
409 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
412 /* Switch back to default Invalidate mode */
413 if (op == OP_FLUSH_N_INV)
414 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
419 * Operation on Entire D-Cache
420 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
421 * Note that constant propagation ensures all the checks are gone
424 static inline void __dc_entire_op(const int op)
430 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
431 aux = ARC_REG_DC_IVDC;
433 aux = ARC_REG_DC_FLSH;
435 write_aux_reg(aux, 0x1);
440 static inline void __dc_disable(void)
442 const int r = ARC_REG_DC_CTRL;
444 __dc_entire_op(OP_FLUSH_N_INV);
445 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
448 static void __dc_enable(void)
450 const int r = ARC_REG_DC_CTRL;
452 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
455 /* For kernel mappings cache operation: index is same as paddr */
456 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
459 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
461 static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
462 unsigned long sz, const int op)
464 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
467 local_irq_save(flags);
471 __cache_line_loop(paddr, vaddr, sz, op, full_page);
475 local_irq_restore(flags);
480 #define __dc_entire_op(op)
481 #define __dc_disable()
482 #define __dc_enable()
483 #define __dc_line_op(paddr, vaddr, sz, op)
484 #define __dc_line_op_k(paddr, sz, op)
486 #endif /* CONFIG_ARC_HAS_DCACHE */
488 #ifdef CONFIG_ARC_HAS_ICACHE
490 static inline void __ic_entire_inv(void)
492 write_aux_reg(ARC_REG_IC_IVIC, 1);
493 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
497 __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
500 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
503 local_irq_save(flags);
504 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
505 local_irq_restore(flags);
510 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
515 phys_addr_t paddr, vaddr;
519 static void __ic_line_inv_vaddr_helper(void *info)
521 struct ic_inv_args *ic_inv = info;
523 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
526 static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
529 struct ic_inv_args ic_inv = {
535 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
538 #endif /* CONFIG_SMP */
540 #else /* !CONFIG_ARC_HAS_ICACHE */
542 #define __ic_entire_inv()
543 #define __ic_line_inv_vaddr(pstart, vstart, sz)
545 #endif /* CONFIG_ARC_HAS_ICACHE */
547 static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
549 #ifdef CONFIG_ISA_ARCV2
551 * SLC is shared between all cores and concurrent aux operations from
552 * multiple cores need to be serialized using a spinlock
553 * A concurrent operation can be silently ignored and/or the old/new
554 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
557 static DEFINE_SPINLOCK(lock);
562 spin_lock_irqsave(&lock, flags);
565 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
566 * - b'000 (default) is Flush,
567 * - b'001 is Invalidate if CTRL.IM == 0
568 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
570 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
572 /* Don't rely on default value of IM bit */
573 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
574 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
579 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
581 ctrl &= ~SLC_CTRL_RGN_OP_INV;
583 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
586 * Lower bits are ignored, no need to clip
587 * END needs to be setup before START (latter triggers the operation)
588 * END can't be same as START, so add (l2_line_sz - 1) to sz
590 end = paddr + sz + l2_line_sz - 1;
591 if (is_pae40_enabled())
592 write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
594 write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
596 if (is_pae40_enabled())
597 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
599 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
601 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
602 read_aux_reg(ARC_REG_SLC_CTRL);
604 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
606 spin_unlock_irqrestore(&lock, flags);
610 static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
612 #ifdef CONFIG_ISA_ARCV2
614 * SLC is shared between all cores and concurrent aux operations from
615 * multiple cores need to be serialized using a spinlock
616 * A concurrent operation can be silently ignored and/or the old/new
617 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
620 static DEFINE_SPINLOCK(lock);
622 const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
623 unsigned int ctrl, cmd;
627 spin_lock_irqsave(&lock, flags);
629 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
631 /* Don't rely on default value of IM bit */
632 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
633 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
637 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
639 cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
641 sz += paddr & ~SLC_LINE_MASK;
642 paddr &= SLC_LINE_MASK;
644 num_lines = DIV_ROUND_UP(sz, l2_line_sz);
646 while (num_lines-- > 0) {
647 write_aux_reg(cmd, paddr);
651 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
652 read_aux_reg(ARC_REG_SLC_CTRL);
654 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
656 spin_unlock_irqrestore(&lock, flags);
660 #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
662 noinline static void slc_entire_op(const int op)
664 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
666 ctrl = read_aux_reg(r);
668 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
669 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
673 write_aux_reg(r, ctrl);
675 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
676 write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
678 write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
680 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
683 /* Important to wait for flush to complete */
684 while (read_aux_reg(r) & SLC_CTRL_BUSY);
687 static inline void arc_slc_disable(void)
689 const int r = ARC_REG_SLC_CTRL;
691 slc_entire_op(OP_FLUSH_N_INV);
692 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
695 static inline void arc_slc_enable(void)
697 const int r = ARC_REG_SLC_CTRL;
699 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
702 /***********************************************************
707 * Handle cache congruency of kernel and userspace mappings of page when kernel
708 * writes-to/reads-from
710 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
711 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
712 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
713 * -In SMP, if hardware caches are coherent
715 * There's a corollary case, where kernel READs from a userspace mapped page.
716 * If the U-mapping is not congruent to K-mapping, former needs flushing.
718 void flush_dcache_page(struct page *page)
720 struct address_space *mapping;
722 if (!cache_is_vipt_aliasing()) {
723 clear_bit(PG_dc_clean, &page->flags);
727 /* don't handle anon pages here */
728 mapping = page_mapping_file(page);
733 * pagecache page, file not yet mapped to userspace
734 * Make a note that K-mapping is dirty
736 if (!mapping_mapped(mapping)) {
737 clear_bit(PG_dc_clean, &page->flags);
738 } else if (page_mapcount(page)) {
740 /* kernel reading from page with U-mapping */
741 phys_addr_t paddr = (unsigned long)page_address(page);
742 unsigned long vaddr = page->index << PAGE_SHIFT;
744 if (addr_not_cache_congruent(paddr, vaddr))
745 __flush_dcache_page(paddr, vaddr);
748 EXPORT_SYMBOL(flush_dcache_page);
751 * DMA ops for systems with L1 cache only
752 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
754 static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
756 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
759 static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
761 __dc_line_op_k(start, sz, OP_INV);
764 static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
766 __dc_line_op_k(start, sz, OP_FLUSH);
770 * DMA ops for systems with both L1 and L2 caches, but without IOC
771 * Both L1 and L2 lines need to be explicitly flushed/invalidated
773 static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
775 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
776 slc_op(start, sz, OP_FLUSH_N_INV);
779 static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
781 __dc_line_op_k(start, sz, OP_INV);
782 slc_op(start, sz, OP_INV);
785 static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
787 __dc_line_op_k(start, sz, OP_FLUSH);
788 slc_op(start, sz, OP_FLUSH);
794 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
796 __dma_cache_wback_inv(start, sz);
798 EXPORT_SYMBOL(dma_cache_wback_inv);
800 void dma_cache_inv(phys_addr_t start, unsigned long sz)
802 __dma_cache_inv(start, sz);
804 EXPORT_SYMBOL(dma_cache_inv);
806 void dma_cache_wback(phys_addr_t start, unsigned long sz)
808 __dma_cache_wback(start, sz);
810 EXPORT_SYMBOL(dma_cache_wback);
813 * This is API for making I/D Caches consistent when modifying
814 * kernel code (loadable modules, kprobes, kgdb...)
815 * This is called on insmod, with kernel virtual address for CODE of
816 * the module. ARC cache maintenance ops require PHY address thus we
817 * need to convert vmalloc addr to PHY addr
819 void flush_icache_range(unsigned long kstart, unsigned long kend)
823 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
825 /* Shortcut for bigger flush ranges.
826 * Here we don't care if this was kernel virtual or phy addr
828 tot_sz = kend - kstart;
829 if (tot_sz > PAGE_SIZE) {
834 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
835 if (likely(kstart > PAGE_OFFSET)) {
837 * The 2nd arg despite being paddr will be used to index icache
838 * This is OK since no alternate virtual mappings will exist
839 * given the callers for this case: kprobe/kgdb in built-in
842 __sync_icache_dcache(kstart, kstart, kend - kstart);
847 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
848 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
849 * handling of kernel vaddr.
851 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
852 * it still needs to handle a 2 page scenario, where the range
853 * straddles across 2 virtual pages and hence need for loop
856 unsigned int off, sz;
857 unsigned long phy, pfn;
859 off = kstart % PAGE_SIZE;
860 pfn = vmalloc_to_pfn((void *)kstart);
861 phy = (pfn << PAGE_SHIFT) + off;
862 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
863 __sync_icache_dcache(phy, kstart, sz);
868 EXPORT_SYMBOL(flush_icache_range);
871 * General purpose helper to make I and D cache lines consistent.
872 * @paddr is phy addr of region
873 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
874 * However in one instance, when called by kprobe (for a breakpt in
875 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
876 * use a paddr to index the cache (despite VIPT). This is fine since a
877 * builtin kernel page will not have any virtual mappings.
878 * kprobe on loadable module will be kernel vaddr.
880 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
882 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
883 __ic_line_inv_vaddr(paddr, vaddr, len);
886 /* wrapper to compile time eliminate alignment checks in flush loop */
887 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
889 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
893 * wrapper to clearout kernel or userspace mappings of a page
894 * For kernel mappings @vaddr == @paddr
896 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
898 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
901 noinline void flush_cache_all(void)
905 local_irq_save(flags);
908 __dc_entire_op(OP_FLUSH_N_INV);
910 local_irq_restore(flags);
914 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
916 void flush_cache_mm(struct mm_struct *mm)
921 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
924 phys_addr_t paddr = pfn << PAGE_SHIFT;
926 u_vaddr &= PAGE_MASK;
928 __flush_dcache_page(paddr, u_vaddr);
930 if (vma->vm_flags & VM_EXEC)
931 __inv_icache_page(paddr, u_vaddr);
934 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
940 void flush_anon_page(struct vm_area_struct *vma, struct page *page,
941 unsigned long u_vaddr)
943 /* TBD: do we really need to clear the kernel mapping */
944 __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
945 __flush_dcache_page((phys_addr_t)page_address(page),
946 (phys_addr_t)page_address(page));
952 void copy_user_highpage(struct page *to, struct page *from,
953 unsigned long u_vaddr, struct vm_area_struct *vma)
955 void *kfrom = kmap_atomic(from);
956 void *kto = kmap_atomic(to);
957 int clean_src_k_mappings = 0;
960 * If SRC page was already mapped in userspace AND it's U-mapping is
961 * not congruent with K-mapping, sync former to physical page so that
962 * K-mapping in memcpy below, sees the right data
964 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
965 * equally valid for SRC page as well
967 * For !VIPT cache, all of this gets compiled out as
968 * addr_not_cache_congruent() is 0
970 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
971 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
972 clean_src_k_mappings = 1;
975 copy_page(kto, kfrom);
978 * Mark DST page K-mapping as dirty for a later finalization by
979 * update_mmu_cache(). Although the finalization could have been done
980 * here as well (given that both vaddr/paddr are available).
981 * But update_mmu_cache() already has code to do that for other
982 * non copied user pages (e.g. read faults which wire in pagecache page
985 clear_bit(PG_dc_clean, &to->flags);
988 * if SRC was already usermapped and non-congruent to kernel mapping
989 * sync the kernel mapping back to physical page
991 if (clean_src_k_mappings) {
992 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
993 set_bit(PG_dc_clean, &from->flags);
995 clear_bit(PG_dc_clean, &from->flags);
999 kunmap_atomic(kfrom);
1002 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
1005 clear_bit(PG_dc_clean, &page->flags);
1007 EXPORT_SYMBOL(clear_user_page);
1009 /**********************************************************************
1010 * Explicit Cache flush request from user space via syscall
1011 * Needed for JITs which generate code on the fly
1013 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
1015 /* TBD: optimize this */
1021 * IO-Coherency (IOC) setup rules:
1023 * 1. Needs to be at system level, so only once by Master core
1024 * Non-Masters need not be accessing caches at that time
1025 * - They are either HALT_ON_RESET and kick started much later or
1026 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
1027 * doesn't perturb caches or coherency unit
1029 * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
1030 * otherwise any straggler data might behave strangely post IOC enabling
1032 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
1033 * Coherency transactions
1035 static noinline void __init arc_ioc_setup(void)
1037 unsigned int ioc_base, mem_sz;
1040 * If IOC was already enabled (due to bootloader) it technically needs to
1041 * be reconfigured with aperture base,size corresponding to Linux memory map
1042 * which will certainly be different than uboot's. But disabling and
1043 * reenabling IOC when DMA might be potentially active is tricky business.
1044 * To avoid random memory issues later, just panic here and ask user to
1045 * upgrade bootloader to one which doesn't enable IOC
1047 if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
1048 panic("IOC already enabled, please upgrade bootloader!\n");
1053 /* Flush + invalidate + disable L1 dcache */
1056 /* Flush + invalidate SLC */
1057 if (read_aux_reg(ARC_REG_SLC_BCR))
1058 slc_entire_op(OP_FLUSH_N_INV);
1061 * currently IOC Aperture covers entire DDR
1062 * TBD: fix for PGU + 1GB of low mem
1065 mem_sz = arc_get_mem_sz();
1067 if (!is_power_of_2(mem_sz) || mem_sz < 4096)
1068 panic("IOC Aperture size must be power of 2 larger than 4KB");
1071 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
1072 * so setting 0x11 implies 512MB, 0x12 implies 1GB...
1074 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
1076 /* for now assume kernel base is start of IOC aperture */
1077 ioc_base = CONFIG_LINUX_RAM_BASE;
1079 if (ioc_base % mem_sz != 0)
1080 panic("IOC Aperture start must be aligned to the size of the aperture");
1082 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
1083 write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
1084 write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
1086 /* Re-enable L1 dcache */
1091 * Cache related boot time checks/setups only needed on master CPU:
1092 * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
1093 * Assume SMP only, so all cores will have same cache config. A check on
1094 * one core suffices for all
1095 * - IOC setup / dma callbacks only need to be done once
1097 static noinline void __init arc_cache_init_master(void)
1099 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1100 struct cpuinfo_arc_cache *ic = &ic_info;
1103 panic("cache support enabled but non-existent cache\n");
1105 if (ic->line_len != L1_CACHE_BYTES)
1106 panic("ICache line [%d] != kernel Config [%d]",
1107 ic->line_len, L1_CACHE_BYTES);
1110 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
1111 * pair to provide vaddr/paddr respectively, just as in MMU v3
1113 if (is_isa_arcv2() && ic->colors > 1)
1114 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1116 _cache_line_loop_ic_fn = __cache_line_loop;
1119 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1120 struct cpuinfo_arc_cache *dc = &dc_info;
1123 panic("cache support enabled but non-existent cache\n");
1125 if (dc->line_len != L1_CACHE_BYTES)
1126 panic("DCache line [%d] != kernel Config [%d]",
1127 dc->line_len, L1_CACHE_BYTES);
1129 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1130 if (is_isa_arcompact()) {
1131 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1133 if (dc->colors > 1) {
1135 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1136 if (CACHE_COLORS_NUM != dc->colors)
1137 panic("CACHE_COLORS_NUM not optimized for config\n");
1138 } else if (handled && dc->colors == 1) {
1139 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1145 * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
1146 * or equal to any cache line length.
1148 BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
1149 "SMP_CACHE_BYTES must be >= any cache line length");
1150 if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
1151 panic("L2 Cache line [%d] > kernel Config [%d]\n",
1152 l2_line_sz, SMP_CACHE_BYTES);
1154 /* Note that SLC disable not formally supported till HS 3.0 */
1155 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1158 if (is_isa_arcv2() && ioc_exists)
1161 if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1162 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1163 __dma_cache_inv = __dma_cache_inv_slc;
1164 __dma_cache_wback = __dma_cache_wback_slc;
1166 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1167 __dma_cache_inv = __dma_cache_inv_l1;
1168 __dma_cache_wback = __dma_cache_wback_l1;
1171 * In case of IOC (say IOC+SLC case), pointers above could still be set
1172 * but end up not being relevant as the first function in chain is not
1173 * called at all for devices using coherent DMA.
1174 * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
1178 void __ref arc_cache_init(void)
1180 unsigned int __maybe_unused cpu = smp_processor_id();
1183 arc_cache_init_master();
1186 * In PAE regime, TLB and cache maintenance ops take wider addresses
1187 * And even if PAE is not enabled in kernel, the upper 32-bits still need
1188 * to be zeroed to keep the ops sane.
1189 * As an optimization for more common !PAE enabled case, zero them out
1190 * once at init, rather than checking/setting to 0 for every runtime op
1192 if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1194 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1195 write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1197 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1198 write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1201 write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1202 write_aux_reg(ARC_REG_SLC_RGN_START1, 0);