arm64: uaccess: Formalise types for access_ok()
[platform/kernel/linux-rpi.git] / arch / arm64 / kernel / armv8_deprecated.c
1 /*
2  *  Copyright (C) 2014 ARM Limited
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #include <linux/cpu.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/perf_event.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/sysctl.h>
16
17 #include <asm/cpufeature.h>
18 #include <asm/insn.h>
19 #include <asm/sysreg.h>
20 #include <asm/system_misc.h>
21 #include <asm/traps.h>
22 #include <asm/kprobes.h>
23 #include <linux/uaccess.h>
24 #include <asm/cpufeature.h>
25
26 #define CREATE_TRACE_POINTS
27 #include "trace-events-emulation.h"
28
29 /*
30  * The runtime support for deprecated instruction support can be in one of
31  * following three states -
32  *
33  * 0 = undef
34  * 1 = emulate (software emulation)
35  * 2 = hw (supported in hardware)
36  */
37 enum insn_emulation_mode {
38         INSN_UNDEF,
39         INSN_EMULATE,
40         INSN_HW,
41 };
42
43 enum legacy_insn_status {
44         INSN_DEPRECATED,
45         INSN_OBSOLETE,
46 };
47
48 struct insn_emulation_ops {
49         const char              *name;
50         enum legacy_insn_status status;
51         struct undef_hook       *hooks;
52         int                     (*set_hw_mode)(bool enable);
53 };
54
55 struct insn_emulation {
56         struct list_head node;
57         struct insn_emulation_ops *ops;
58         int current_mode;
59         int min;
60         int max;
61 };
62
63 static LIST_HEAD(insn_emulation);
64 static int nr_insn_emulated __initdata;
65 static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
66
67 static void register_emulation_hooks(struct insn_emulation_ops *ops)
68 {
69         struct undef_hook *hook;
70
71         BUG_ON(!ops->hooks);
72
73         for (hook = ops->hooks; hook->instr_mask; hook++)
74                 register_undef_hook(hook);
75
76         pr_notice("Registered %s emulation handler\n", ops->name);
77 }
78
79 static void remove_emulation_hooks(struct insn_emulation_ops *ops)
80 {
81         struct undef_hook *hook;
82
83         BUG_ON(!ops->hooks);
84
85         for (hook = ops->hooks; hook->instr_mask; hook++)
86                 unregister_undef_hook(hook);
87
88         pr_notice("Removed %s emulation handler\n", ops->name);
89 }
90
91 static void enable_insn_hw_mode(void *data)
92 {
93         struct insn_emulation *insn = (struct insn_emulation *)data;
94         if (insn->ops->set_hw_mode)
95                 insn->ops->set_hw_mode(true);
96 }
97
98 static void disable_insn_hw_mode(void *data)
99 {
100         struct insn_emulation *insn = (struct insn_emulation *)data;
101         if (insn->ops->set_hw_mode)
102                 insn->ops->set_hw_mode(false);
103 }
104
105 /* Run set_hw_mode(mode) on all active CPUs */
106 static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
107 {
108         if (!insn->ops->set_hw_mode)
109                 return -EINVAL;
110         if (enable)
111                 on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
112         else
113                 on_each_cpu(disable_insn_hw_mode, (void *)insn, true);
114         return 0;
115 }
116
117 /*
118  * Run set_hw_mode for all insns on a starting CPU.
119  * Returns:
120  *  0           - If all the hooks ran successfully.
121  * -EINVAL      - At least one hook is not supported by the CPU.
122  */
123 static int run_all_insn_set_hw_mode(unsigned int cpu)
124 {
125         int rc = 0;
126         unsigned long flags;
127         struct insn_emulation *insn;
128
129         raw_spin_lock_irqsave(&insn_emulation_lock, flags);
130         list_for_each_entry(insn, &insn_emulation, node) {
131                 bool enable = (insn->current_mode == INSN_HW);
132                 if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
133                         pr_warn("CPU[%u] cannot support the emulation of %s",
134                                 cpu, insn->ops->name);
135                         rc = -EINVAL;
136                 }
137         }
138         raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
139         return rc;
140 }
141
142 static int update_insn_emulation_mode(struct insn_emulation *insn,
143                                        enum insn_emulation_mode prev)
144 {
145         int ret = 0;
146
147         switch (prev) {
148         case INSN_UNDEF: /* Nothing to be done */
149                 break;
150         case INSN_EMULATE:
151                 remove_emulation_hooks(insn->ops);
152                 break;
153         case INSN_HW:
154                 if (!run_all_cpu_set_hw_mode(insn, false))
155                         pr_notice("Disabled %s support\n", insn->ops->name);
156                 break;
157         }
158
159         switch (insn->current_mode) {
160         case INSN_UNDEF:
161                 break;
162         case INSN_EMULATE:
163                 register_emulation_hooks(insn->ops);
164                 break;
165         case INSN_HW:
166                 ret = run_all_cpu_set_hw_mode(insn, true);
167                 if (!ret)
168                         pr_notice("Enabled %s support\n", insn->ops->name);
169                 break;
170         }
171
172         return ret;
173 }
174
175 static void __init register_insn_emulation(struct insn_emulation_ops *ops)
176 {
177         unsigned long flags;
178         struct insn_emulation *insn;
179
180         insn = kzalloc(sizeof(*insn), GFP_KERNEL);
181         insn->ops = ops;
182         insn->min = INSN_UNDEF;
183
184         switch (ops->status) {
185         case INSN_DEPRECATED:
186                 insn->current_mode = INSN_EMULATE;
187                 /* Disable the HW mode if it was turned on at early boot time */
188                 run_all_cpu_set_hw_mode(insn, false);
189                 insn->max = INSN_HW;
190                 break;
191         case INSN_OBSOLETE:
192                 insn->current_mode = INSN_UNDEF;
193                 insn->max = INSN_EMULATE;
194                 break;
195         }
196
197         raw_spin_lock_irqsave(&insn_emulation_lock, flags);
198         list_add(&insn->node, &insn_emulation);
199         nr_insn_emulated++;
200         raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
201
202         /* Register any handlers if required */
203         update_insn_emulation_mode(insn, INSN_UNDEF);
204 }
205
206 static int emulation_proc_handler(struct ctl_table *table, int write,
207                                   void __user *buffer, size_t *lenp,
208                                   loff_t *ppos)
209 {
210         int ret = 0;
211         struct insn_emulation *insn = (struct insn_emulation *) table->data;
212         enum insn_emulation_mode prev_mode = insn->current_mode;
213
214         table->data = &insn->current_mode;
215         ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
216
217         if (ret || !write || prev_mode == insn->current_mode)
218                 goto ret;
219
220         ret = update_insn_emulation_mode(insn, prev_mode);
221         if (ret) {
222                 /* Mode change failed, revert to previous mode. */
223                 insn->current_mode = prev_mode;
224                 update_insn_emulation_mode(insn, INSN_UNDEF);
225         }
226 ret:
227         table->data = insn;
228         return ret;
229 }
230
231 static void __init register_insn_emulation_sysctl(void)
232 {
233         unsigned long flags;
234         int i = 0;
235         struct insn_emulation *insn;
236         struct ctl_table *insns_sysctl, *sysctl;
237
238         insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1),
239                               GFP_KERNEL);
240
241         raw_spin_lock_irqsave(&insn_emulation_lock, flags);
242         list_for_each_entry(insn, &insn_emulation, node) {
243                 sysctl = &insns_sysctl[i];
244
245                 sysctl->mode = 0644;
246                 sysctl->maxlen = sizeof(int);
247
248                 sysctl->procname = insn->ops->name;
249                 sysctl->data = insn;
250                 sysctl->extra1 = &insn->min;
251                 sysctl->extra2 = &insn->max;
252                 sysctl->proc_handler = emulation_proc_handler;
253                 i++;
254         }
255         raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
256
257         register_sysctl("abi", insns_sysctl);
258 }
259
260 /*
261  *  Implement emulation of the SWP/SWPB instructions using load-exclusive and
262  *  store-exclusive.
263  *
264  *  Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
265  *  Where: Rt  = destination
266  *         Rt2 = source
267  *         Rn  = address
268  */
269
270 /*
271  * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
272  */
273
274 /* Arbitrary constant to ensure forward-progress of the LL/SC loop */
275 #define __SWP_LL_SC_LOOPS       4
276
277 #define __user_swpX_asm(data, addr, res, temp, temp2, B)        \
278 do {                                                            \
279         uaccess_enable();                                       \
280         __asm__ __volatile__(                                   \
281         "       mov             %w3, %w7\n"                     \
282         "0:     ldxr"B"         %w2, [%4]\n"                    \
283         "1:     stxr"B"         %w0, %w1, [%4]\n"               \
284         "       cbz             %w0, 2f\n"                      \
285         "       sub             %w3, %w3, #1\n"                 \
286         "       cbnz            %w3, 0b\n"                      \
287         "       mov             %w0, %w5\n"                     \
288         "       b               3f\n"                           \
289         "2:\n"                                                  \
290         "       mov             %w1, %w2\n"                     \
291         "3:\n"                                                  \
292         "       .pushsection     .fixup,\"ax\"\n"               \
293         "       .align          2\n"                            \
294         "4:     mov             %w0, %w6\n"                     \
295         "       b               3b\n"                           \
296         "       .popsection"                                    \
297         _ASM_EXTABLE(0b, 4b)                                    \
298         _ASM_EXTABLE(1b, 4b)                                    \
299         : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
300         : "r" ((unsigned long)addr), "i" (-EAGAIN),             \
301           "i" (-EFAULT),                                        \
302           "i" (__SWP_LL_SC_LOOPS)                               \
303         : "memory");                                            \
304         uaccess_disable();                                      \
305 } while (0)
306
307 #define __user_swp_asm(data, addr, res, temp, temp2) \
308         __user_swpX_asm(data, addr, res, temp, temp2, "")
309 #define __user_swpb_asm(data, addr, res, temp, temp2) \
310         __user_swpX_asm(data, addr, res, temp, temp2, "b")
311
312 /*
313  * Bit 22 of the instruction encoding distinguishes between
314  * the SWP and SWPB variants (bit set means SWPB).
315  */
316 #define TYPE_SWPB (1 << 22)
317
318 static int emulate_swpX(unsigned int address, unsigned int *data,
319                         unsigned int type)
320 {
321         unsigned int res = 0;
322
323         if ((type != TYPE_SWPB) && (address & 0x3)) {
324                 /* SWP to unaligned address not permitted */
325                 pr_debug("SWP instruction on unaligned pointer!\n");
326                 return -EFAULT;
327         }
328
329         while (1) {
330                 unsigned long temp, temp2;
331
332                 if (type == TYPE_SWPB)
333                         __user_swpb_asm(*data, address, res, temp, temp2);
334                 else
335                         __user_swp_asm(*data, address, res, temp, temp2);
336
337                 if (likely(res != -EAGAIN) || signal_pending(current))
338                         break;
339
340                 cond_resched();
341         }
342
343         return res;
344 }
345
346 #define ARM_OPCODE_CONDTEST_FAIL   0
347 #define ARM_OPCODE_CONDTEST_PASS   1
348 #define ARM_OPCODE_CONDTEST_UNCOND 2
349
350 #define ARM_OPCODE_CONDITION_UNCOND     0xf
351
352 static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
353 {
354         u32 cc_bits  = opcode >> 28;
355
356         if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
357                 if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
358                         return ARM_OPCODE_CONDTEST_PASS;
359                 else
360                         return ARM_OPCODE_CONDTEST_FAIL;
361         }
362         return ARM_OPCODE_CONDTEST_UNCOND;
363 }
364
365 /*
366  * swp_handler logs the id of calling process, dissects the instruction, sanity
367  * checks the memory location, calls emulate_swpX for the actual operation and
368  * deals with fixup/error handling before returning
369  */
370 static int swp_handler(struct pt_regs *regs, u32 instr)
371 {
372         u32 destreg, data, type, address = 0;
373         const void __user *user_ptr;
374         int rn, rt2, res = 0;
375
376         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
377
378         type = instr & TYPE_SWPB;
379
380         switch (aarch32_check_condition(instr, regs->pstate)) {
381         case ARM_OPCODE_CONDTEST_PASS:
382                 break;
383         case ARM_OPCODE_CONDTEST_FAIL:
384                 /* Condition failed - return to next instruction */
385                 goto ret;
386         case ARM_OPCODE_CONDTEST_UNCOND:
387                 /* If unconditional encoding - not a SWP, undef */
388                 return -EFAULT;
389         default:
390                 return -EINVAL;
391         }
392
393         rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET);
394         rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET);
395
396         address = (u32)regs->user_regs.regs[rn];
397         data    = (u32)regs->user_regs.regs[rt2];
398         destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET);
399
400         pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
401                 rn, address, destreg,
402                 aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
403
404         /* Check access in reasonable access range for both SWP and SWPB */
405         user_ptr = (const void __user *)(unsigned long)(address & ~3);
406         if (!access_ok(VERIFY_WRITE, user_ptr, 4)) {
407                 pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
408                         address);
409                 goto fault;
410         }
411
412         res = emulate_swpX(address, &data, type);
413         if (res == -EFAULT)
414                 goto fault;
415         else if (res == 0)
416                 regs->user_regs.regs[destreg] = data;
417
418 ret:
419         if (type == TYPE_SWPB)
420                 trace_instruction_emulation("swpb", regs->pc);
421         else
422                 trace_instruction_emulation("swp", regs->pc);
423
424         pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
425                         current->comm, (unsigned long)current->pid, regs->pc);
426
427         arm64_skip_faulting_instruction(regs, 4);
428         return 0;
429
430 fault:
431         pr_debug("SWP{B} emulation: access caused memory abort!\n");
432         arm64_notify_segfault(regs, address);
433
434         return 0;
435 }
436
437 /*
438  * Only emulate SWP/SWPB executed in ARM state/User mode.
439  * The kernel must be SWP free and SWP{B} does not exist in Thumb.
440  */
441 static struct undef_hook swp_hooks[] = {
442         {
443                 .instr_mask     = 0x0fb00ff0,
444                 .instr_val      = 0x01000090,
445                 .pstate_mask    = COMPAT_PSR_MODE_MASK,
446                 .pstate_val     = COMPAT_PSR_MODE_USR,
447                 .fn             = swp_handler
448         },
449         { }
450 };
451
452 static struct insn_emulation_ops swp_ops = {
453         .name = "swp",
454         .status = INSN_OBSOLETE,
455         .hooks = swp_hooks,
456         .set_hw_mode = NULL,
457 };
458
459 static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
460 {
461         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
462
463         switch (aarch32_check_condition(instr, regs->pstate)) {
464         case ARM_OPCODE_CONDTEST_PASS:
465                 break;
466         case ARM_OPCODE_CONDTEST_FAIL:
467                 /* Condition failed - return to next instruction */
468                 goto ret;
469         case ARM_OPCODE_CONDTEST_UNCOND:
470                 /* If unconditional encoding - not a barrier instruction */
471                 return -EFAULT;
472         default:
473                 return -EINVAL;
474         }
475
476         switch (aarch32_insn_mcr_extract_crm(instr)) {
477         case 10:
478                 /*
479                  * dmb - mcr p15, 0, Rt, c7, c10, 5
480                  * dsb - mcr p15, 0, Rt, c7, c10, 4
481                  */
482                 if (aarch32_insn_mcr_extract_opc2(instr) == 5) {
483                         dmb(sy);
484                         trace_instruction_emulation(
485                                 "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc);
486                 } else {
487                         dsb(sy);
488                         trace_instruction_emulation(
489                                 "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc);
490                 }
491                 break;
492         case 5:
493                 /*
494                  * isb - mcr p15, 0, Rt, c7, c5, 4
495                  *
496                  * Taking an exception or returning from one acts as an
497                  * instruction barrier. So no explicit barrier needed here.
498                  */
499                 trace_instruction_emulation(
500                         "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc);
501                 break;
502         }
503
504 ret:
505         pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
506                         current->comm, (unsigned long)current->pid, regs->pc);
507
508         arm64_skip_faulting_instruction(regs, 4);
509         return 0;
510 }
511
512 static int cp15_barrier_set_hw_mode(bool enable)
513 {
514         if (enable)
515                 config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
516         else
517                 config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
518         return 0;
519 }
520
521 static struct undef_hook cp15_barrier_hooks[] = {
522         {
523                 .instr_mask     = 0x0fff0fdf,
524                 .instr_val      = 0x0e070f9a,
525                 .pstate_mask    = COMPAT_PSR_MODE_MASK,
526                 .pstate_val     = COMPAT_PSR_MODE_USR,
527                 .fn             = cp15barrier_handler,
528         },
529         {
530                 .instr_mask     = 0x0fff0fff,
531                 .instr_val      = 0x0e070f95,
532                 .pstate_mask    = COMPAT_PSR_MODE_MASK,
533                 .pstate_val     = COMPAT_PSR_MODE_USR,
534                 .fn             = cp15barrier_handler,
535         },
536         { }
537 };
538
539 static struct insn_emulation_ops cp15_barrier_ops = {
540         .name = "cp15_barrier",
541         .status = INSN_DEPRECATED,
542         .hooks = cp15_barrier_hooks,
543         .set_hw_mode = cp15_barrier_set_hw_mode,
544 };
545
546 static int setend_set_hw_mode(bool enable)
547 {
548         if (!cpu_supports_mixed_endian_el0())
549                 return -EINVAL;
550
551         if (enable)
552                 config_sctlr_el1(SCTLR_EL1_SED, 0);
553         else
554                 config_sctlr_el1(0, SCTLR_EL1_SED);
555         return 0;
556 }
557
558 static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
559 {
560         char *insn;
561
562         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
563
564         if (big_endian) {
565                 insn = "setend be";
566                 regs->pstate |= COMPAT_PSR_E_BIT;
567         } else {
568                 insn = "setend le";
569                 regs->pstate &= ~COMPAT_PSR_E_BIT;
570         }
571
572         trace_instruction_emulation(insn, regs->pc);
573         pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n",
574                         current->comm, (unsigned long)current->pid, regs->pc);
575
576         return 0;
577 }
578
579 static int a32_setend_handler(struct pt_regs *regs, u32 instr)
580 {
581         int rc = compat_setend_handler(regs, (instr >> 9) & 1);
582         arm64_skip_faulting_instruction(regs, 4);
583         return rc;
584 }
585
586 static int t16_setend_handler(struct pt_regs *regs, u32 instr)
587 {
588         int rc = compat_setend_handler(regs, (instr >> 3) & 1);
589         arm64_skip_faulting_instruction(regs, 2);
590         return rc;
591 }
592
593 static struct undef_hook setend_hooks[] = {
594         {
595                 .instr_mask     = 0xfffffdff,
596                 .instr_val      = 0xf1010000,
597                 .pstate_mask    = COMPAT_PSR_MODE_MASK,
598                 .pstate_val     = COMPAT_PSR_MODE_USR,
599                 .fn             = a32_setend_handler,
600         },
601         {
602                 /* Thumb mode */
603                 .instr_mask     = 0x0000fff7,
604                 .instr_val      = 0x0000b650,
605                 .pstate_mask    = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK),
606                 .pstate_val     = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR),
607                 .fn             = t16_setend_handler,
608         },
609         {}
610 };
611
612 static struct insn_emulation_ops setend_ops = {
613         .name = "setend",
614         .status = INSN_DEPRECATED,
615         .hooks = setend_hooks,
616         .set_hw_mode = setend_set_hw_mode,
617 };
618
619 /*
620  * Invoked as late_initcall, since not needed before init spawned.
621  */
622 static int __init armv8_deprecated_init(void)
623 {
624         if (IS_ENABLED(CONFIG_SWP_EMULATION))
625                 register_insn_emulation(&swp_ops);
626
627         if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
628                 register_insn_emulation(&cp15_barrier_ops);
629
630         if (IS_ENABLED(CONFIG_SETEND_EMULATION)) {
631                 if(system_supports_mixed_endian_el0())
632                         register_insn_emulation(&setend_ops);
633                 else
634                         pr_info("setend instruction emulation is not supported on this system\n");
635         }
636
637         cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
638                                   "arm64/isndep:starting",
639                                   run_all_insn_set_hw_mode, NULL);
640         register_insn_emulation_sysctl();
641
642         return 0;
643 }
644
645 core_initcall(armv8_deprecated_init);