2 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
3 * Copyright 2003 Andi Kleen, SuSE Labs.
5 * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ]
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
17 * Note: the concept clashes with user mode linux. UML users should
21 #include <linux/time.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/timer.h>
25 #include <linux/seqlock.h>
26 #include <linux/jiffies.h>
27 #include <linux/sysctl.h>
28 #include <linux/topology.h>
29 #include <linux/clocksource.h>
30 #include <linux/getcpu.h>
31 #include <linux/cpu.h>
32 #include <linux/smp.h>
33 #include <linux/notifier.h>
34 #include <linux/syscalls.h>
35 #include <linux/ratelimit.h>
37 #include <asm/vsyscall.h>
38 #include <asm/pgtable.h>
39 #include <asm/compat.h>
41 #include <asm/unistd.h>
42 #include <asm/fixmap.h>
43 #include <asm/errno.h>
45 #include <asm/segment.h>
47 #include <asm/topology.h>
48 #include <asm/vgtod.h>
49 #include <asm/traps.h>
51 #define CREATE_TRACE_POINTS
52 #include "vsyscall_trace.h"
54 DEFINE_VVAR(int, vgetcpu_mode);
55 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
57 static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
59 static int __init vsyscall_setup(char *str)
62 if (!strcmp("emulate", str))
63 vsyscall_mode = EMULATE;
64 else if (!strcmp("native", str))
65 vsyscall_mode = NATIVE;
66 else if (!strcmp("none", str))
76 early_param("vsyscall", vsyscall_setup);
78 void update_vsyscall_tz(void)
80 vsyscall_gtod_data.sys_tz = sys_tz;
83 void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
84 struct clocksource *clock, u32 mult)
86 write_seqcount_begin(&vsyscall_gtod_data.seq);
87 struct timespec monotonic;
89 /* copy vsyscall data */
90 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
91 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
92 vsyscall_gtod_data.clock.mask = clock->mask;
93 vsyscall_gtod_data.clock.mult = mult;
94 vsyscall_gtod_data.clock.shift = clock->shift;
96 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
97 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
99 monotonic = timespec_add(*wall_time, *wtm);
100 vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec;
101 vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
103 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
104 vsyscall_gtod_data.monotonic_time_coarse =
105 timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
107 write_seqcount_end(&vsyscall_gtod_data.seq);
110 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
113 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
114 struct task_struct *tsk;
116 if (!show_unhandled_signals || !__ratelimit(&rs))
121 printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
122 level, tsk->comm, task_pid_nr(tsk),
123 message, regs->ip, regs->cs,
124 regs->sp, regs->ax, regs->si, regs->di);
127 static int addr_to_vsyscall_nr(unsigned long addr)
131 if ((addr & ~0xC00UL) != VSYSCALL_START)
134 nr = (addr & 0xC00UL) >> 10;
141 static bool write_ok_or_segv(unsigned long ptr, size_t size)
144 * XXX: if access_ok, get_user, and put_user handled
145 * sig_on_uaccess_error, this could go away.
148 if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
150 struct thread_struct *thread = ¤t->thread;
152 thread->error_code = 6; /* user fault, no page, write */
154 thread->trap_no = 14;
156 memset(&info, 0, sizeof(info));
157 info.si_signo = SIGSEGV;
159 info.si_code = SEGV_MAPERR;
160 info.si_addr = (void __user *)ptr;
162 force_sig_info(SIGSEGV, &info, current);
169 bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
171 struct task_struct *tsk;
172 unsigned long caller;
174 int prev_sig_on_uaccess_error;
178 * No point in checking CS -- the only way to get here is a user mode
179 * trap to a high address, which means that we're in 64-bit user code.
182 WARN_ON_ONCE(address != regs->ip);
184 if (vsyscall_mode == NONE) {
185 warn_bad_vsyscall(KERN_INFO, regs,
186 "vsyscall attempted with vsyscall=none");
190 vsyscall_nr = addr_to_vsyscall_nr(address);
192 trace_emulate_vsyscall(vsyscall_nr);
194 if (vsyscall_nr < 0) {
195 warn_bad_vsyscall(KERN_WARNING, regs,
196 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
200 if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
201 warn_bad_vsyscall(KERN_WARNING, regs,
202 "vsyscall with bad stack (exploit attempt?)");
207 if (seccomp_mode(&tsk->seccomp))
211 * With a real vsyscall, page faults cause SIGSEGV. We want to
212 * preserve that behavior to make writing exploits harder.
214 prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
215 current_thread_info()->sig_on_uaccess_error = 1;
218 * 0 is a valid user pointer (in the access_ok sense) on 32-bit and
219 * 64-bit, so we don't need to special-case it here. For all the
220 * vsyscalls, 0 means "don't write anything" not "write it at
224 switch (vsyscall_nr) {
226 if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
227 !write_ok_or_segv(regs->si, sizeof(struct timezone)))
230 ret = sys_gettimeofday(
231 (struct timeval __user *)regs->di,
232 (struct timezone __user *)regs->si);
236 if (!write_ok_or_segv(regs->di, sizeof(time_t)))
239 ret = sys_time((time_t __user *)regs->di);
243 if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
244 !write_ok_or_segv(regs->si, sizeof(unsigned)))
247 ret = sys_getcpu((unsigned __user *)regs->di,
248 (unsigned __user *)regs->si,
253 current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
255 if (ret == -EFAULT) {
256 /* Bad news -- userspace fed a bad pointer to a vsyscall. */
257 warn_bad_vsyscall(KERN_INFO, regs,
258 "vsyscall fault (exploit attempt?)");
261 * If we failed to generate a signal for any reason,
262 * generate one here. (This should be impossible.)
264 if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
265 !sigismember(&tsk->pending.signal, SIGSEGV)))
268 return true; /* Don't emulate the ret. */
273 /* Emulate a ret instruction. */
280 force_sig(SIGSEGV, current);
285 * Assume __initcall executes before all user space. Hopefully kmod
286 * doesn't violate that. We'll find out if it does.
288 static void __cpuinit vsyscall_set_cpu(int cpu)
291 unsigned long node = 0;
293 node = cpu_to_node(cpu);
295 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
296 write_rdtscp_aux((node << 12) | cpu);
299 * Store cpu number in limit so that it can be loaded quickly
300 * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
302 d = 0x0f40000000000ULL;
304 d |= (node & 0xf) << 12;
305 d |= (node >> 4) << 48;
307 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
310 static void __cpuinit cpu_vsyscall_init(void *arg)
312 /* preemption should be already off */
313 vsyscall_set_cpu(raw_smp_processor_id());
317 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
319 long cpu = (long)arg;
321 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
322 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
327 void __init map_vsyscall(void)
329 extern char __vsyscall_page;
330 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
331 extern char __vvar_page;
332 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
334 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
335 vsyscall_mode == NATIVE
336 ? PAGE_KERNEL_VSYSCALL
338 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
339 (unsigned long)VSYSCALL_START);
341 __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
342 BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
343 (unsigned long)VVAR_ADDRESS);
346 static int __init vsyscall_init(void)
348 BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
350 on_each_cpu(cpu_vsyscall_init, NULL, 1);
351 /* notifier priority > KVM */
352 hotcpu_notifier(cpu_vsyscall_notifier, 30);
356 __initcall(vsyscall_init);