1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2014 Darius Rad <darius@bluespec.com>
5 * Copyright (C) 2017 SiFive
8 #include <linux/syscalls.h>
9 #include <asm/cacheflush.h>
10 #include <asm/hwprobe.h>
12 #include <asm/switch_to.h>
13 #include <asm/uaccess.h>
14 #include <asm/unistd.h>
15 #include <asm-generic/mman-common.h>
17 static long riscv_sys_mmap(unsigned long addr, unsigned long len,
18 unsigned long prot, unsigned long flags,
19 unsigned long fd, off_t offset,
20 unsigned long page_shift_offset)
22 if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
25 return ksys_mmap_pgoff(addr, len, prot, flags, fd,
26 offset >> (PAGE_SHIFT - page_shift_offset));
30 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
31 unsigned long, prot, unsigned long, flags,
32 unsigned long, fd, off_t, offset)
34 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
38 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
39 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
40 unsigned long, prot, unsigned long, flags,
41 unsigned long, fd, off_t, offset)
44 * Note that the shift for mmap2 is constant (12),
45 * regardless of PAGE_SIZE
47 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
52 * Allows the instruction cache to be flushed from userspace. Despite RISC-V
53 * having a direct 'fence.i' instruction available to userspace (which we
54 * can't trap!), that's not actually viable when running on Linux because the
55 * kernel might schedule a process on another hart. There is no way for
56 * userspace to handle this without invoking the kernel (as it doesn't know the
57 * thread->hart mappings), so we've defined a RISC-V specific system call to
58 * flush the instruction cache.
60 * sys_riscv_flush_icache() is defined to flush the instruction cache over an
61 * address range, with the flush applying to either all threads or just the
62 * caller. We don't currently do anything with the address range, that's just
63 * in there for forwards compatibility.
65 SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
68 /* Check the reserved flags. */
69 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
72 flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
78 * The hwprobe interface, for allowing userspace to probe to see which features
79 * are supported by the hardware. See Documentation/riscv/hwprobe.rst for more
82 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
83 const struct cpumask *cpus)
89 for_each_cpu(cpu, cpus) {
93 case RISCV_HWPROBE_KEY_MVENDORID:
94 cpu_id = riscv_cached_mvendorid(cpu);
96 case RISCV_HWPROBE_KEY_MIMPID:
97 cpu_id = riscv_cached_mimpid(cpu);
99 case RISCV_HWPROBE_KEY_MARCHID:
100 cpu_id = riscv_cached_marchid(cpu);
108 * If there's a mismatch for the given set, return -1 in the
120 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
121 const struct cpumask *cpus)
124 case RISCV_HWPROBE_KEY_MVENDORID:
125 case RISCV_HWPROBE_KEY_MARCHID:
126 case RISCV_HWPROBE_KEY_MIMPID:
127 hwprobe_arch_id(pair, cpus);
130 * The kernel already assumes that the base single-letter ISA
131 * extensions are supported on all harts, and only supports the
132 * IMA base, so just cheat a bit here and tell that to
135 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
136 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
139 case RISCV_HWPROBE_KEY_IMA_EXT_0:
142 pair->value |= RISCV_HWPROBE_IMA_FD;
144 if (riscv_isa_extension_available(NULL, c))
145 pair->value |= RISCV_HWPROBE_IMA_C;
150 * For forward compatibility, unknown keys don't fail the whole
151 * call, but get their element key set to -1 and value set to 0
152 * indicating they're unrecognized.
161 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
162 size_t pair_count, size_t cpu_count,
163 unsigned long __user *cpus_user,
170 /* Check the reserved flags. */
175 * The interface supports taking in a CPU mask, and returns values that
176 * are consistent across that mask. Allow userspace to specify NULL and
177 * 0 as a shortcut to all online CPUs.
179 cpumask_clear(&cpus);
180 if (!cpu_count && !cpus_user) {
181 cpumask_copy(&cpus, cpu_online_mask);
183 if (cpu_count > cpumask_size())
184 cpu_count = cpumask_size();
186 ret = copy_from_user(&cpus, cpus_user, cpu_count);
191 * Userspace must provide at least one online CPU, without that
192 * there's no way to define what is supported.
194 cpumask_and(&cpus, &cpus, cpu_online_mask);
195 if (cpumask_empty(&cpus))
199 for (out = 0; out < pair_count; out++, pairs++) {
200 struct riscv_hwprobe pair;
202 if (get_user(pair.key, &pairs->key))
206 hwprobe_one_pair(&pair, &cpus);
207 ret = put_user(pair.key, &pairs->key);
209 ret = put_user(pair.value, &pairs->value);
218 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
219 size_t, pair_count, size_t, cpu_count, unsigned long __user *,
220 cpus, unsigned int, flags)
222 return do_riscv_hwprobe(pairs, pair_count, cpu_count,