2 * Copyright (C) 2012-2014 Panasonic Corporation
3 * Copyright (C) 2015-2016 Socionext Inc.
4 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
6 * SPDX-License-Identifier: GPL-2.0+
11 #include <linux/kernel.h>
12 #include <asm/armv7.h>
13 #include <asm/processor.h>
17 #define UNIPHIER_SSCOQAD_IS_NEEDED(op) \
18 ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
20 #ifdef CONFIG_UNIPHIER_L2CACHE_ON
22 /* uniphier_cache_sync - perform a sync point for a particular cache level */
23 static void uniphier_cache_sync(void)
25 /* drain internal buffers */
26 writel(UNIPHIER_SSCOPE_CM_SYNC, UNIPHIER_SSCOPE);
27 /* need a read back to confirm */
28 readl(UNIPHIER_SSCOPE);
32 * uniphier_cache_maint_common - run a queue operation
34 * @start: start address of range operation (don't care for "all" operation)
35 * @size: data size of range operation (don't care for "all" operation)
36 * @operation: flags to specify the desired cache operation
38 static void uniphier_cache_maint_common(u32 start, u32 size, u32 operation)
40 /* clear the complete notification flag */
41 writel(UNIPHIER_SSCOLPQS_EF, UNIPHIER_SSCOLPQS);
44 /* set cache operation */
45 writel(UNIPHIER_SSCOQM_CE | operation, UNIPHIER_SSCOQM);
47 /* set address range if needed */
48 if (likely(UNIPHIER_SSCOQAD_IS_NEEDED(operation))) {
49 writel(start, UNIPHIER_SSCOQAD);
50 writel(size, UNIPHIER_SSCOQSZ);
52 } while (unlikely(readl(UNIPHIER_SSCOPPQSEF) &
53 (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
55 /* wait until the operation is completed */
56 while (likely(readl(UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF))
60 static void uniphier_cache_maint_all(u32 operation)
62 uniphier_cache_maint_common(0, 0, UNIPHIER_SSCOQM_S_ALL | operation);
64 uniphier_cache_sync();
67 static void uniphier_cache_maint_range(u32 start, u32 end, u32 operation)
72 * If the start address is not aligned,
73 * perform a cache operation for the first cache-line
75 start = start & ~(UNIPHIER_SSC_LINE_SIZE - 1);
79 if (unlikely(size >= (u32)(-UNIPHIER_SSC_LINE_SIZE))) {
80 /* this means cache operation for all range */
81 uniphier_cache_maint_all(operation);
86 * If the end address is not aligned,
87 * perform a cache operation for the last cache-line
89 size = ALIGN(size, UNIPHIER_SSC_LINE_SIZE);
92 u32 chunk_size = min_t(u32, size, UNIPHIER_SSC_RANGE_OP_MAX_SIZE);
94 uniphier_cache_maint_common(start, chunk_size,
95 UNIPHIER_SSCOQM_S_RANGE | operation);
101 uniphier_cache_sync();
104 void v7_outer_cache_flush_all(void)
106 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
109 void v7_outer_cache_inval_all(void)
111 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
114 void v7_outer_cache_flush_range(u32 start, u32 end)
116 uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
119 void v7_outer_cache_inval_range(u32 start, u32 end)
121 if (start & (UNIPHIER_SSC_LINE_SIZE - 1)) {
122 start &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
123 uniphier_cache_maint_range(start, UNIPHIER_SSC_LINE_SIZE,
124 UNIPHIER_SSCOQM_CM_FLUSH);
125 start += UNIPHIER_SSC_LINE_SIZE;
129 uniphier_cache_sync();
133 if (end & (UNIPHIER_SSC_LINE_SIZE - 1)) {
134 end &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
135 uniphier_cache_maint_range(end, UNIPHIER_SSC_LINE_SIZE,
136 UNIPHIER_SSCOQM_CM_FLUSH);
140 uniphier_cache_sync();
144 uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
147 void v7_outer_cache_enable(void)
151 writel(U32_MAX, UNIPHIER_SSCLPDAWCR); /* activate all ways */
152 tmp = readl(UNIPHIER_SSCC);
153 tmp |= UNIPHIER_SSCC_ON;
154 writel(tmp, UNIPHIER_SSCC);
158 void v7_outer_cache_disable(void)
162 tmp = readl(UNIPHIER_SSCC);
163 tmp &= ~UNIPHIER_SSCC_ON;
164 writel(tmp, UNIPHIER_SSCC);
167 void enable_caches(void)