m32r: fix 'fix breakage from "m32r: use generic ptrace_resume code"' fallout
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / m32r / kernel / ptrace.c
1 /*
2  * linux/arch/m32r/kernel/ptrace.c
3  *
4  * Copyright (C) 2002  Hirokazu Takata, Takeo Takahashi
5  * Copyright (C) 2004  Hirokazu Takata, Kei Sakamoto
6  *
7  * Original x86 implementation:
8  *      By Ross Biro 1/23/92
9  *      edited by Linus Torvalds
10  *
11  * Some code taken from sh version:
12  *   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
13  * Some code taken from arm version:
14  *   Copyright (C) 2000 Russell King
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/mm.h>
20 #include <linux/err.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/user.h>
25 #include <linux/string.h>
26 #include <linux/signal.h>
27
28 #include <asm/cacheflush.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/processor.h>
33 #include <asm/mmu_context.h>
34
35 /*
36  * This routine will get a word off of the process kernel stack.
37  */
38 static inline unsigned long int
39 get_stack_long(struct task_struct *task, int offset)
40 {
41         unsigned long *stack;
42
43         stack = (unsigned long *)task_pt_regs(task);
44
45         return stack[offset];
46 }
47
48 /*
49  * This routine will put a word on the process kernel stack.
50  */
51 static inline int
52 put_stack_long(struct task_struct *task, int offset, unsigned long data)
53 {
54         unsigned long *stack;
55
56         stack = (unsigned long *)task_pt_regs(task);
57         stack[offset] = data;
58
59         return 0;
60 }
61
62 static int reg_offset[] = {
63         PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7,
64         PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU,
65 };
66
67 /*
68  * Read the word at offset "off" into the "struct user".  We
69  * actually access the pt_regs stored on the kernel stack.
70  */
71 static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
72                             unsigned long __user *data)
73 {
74         unsigned long tmp;
75 #ifndef NO_FPU
76         struct user * dummy = NULL;
77 #endif
78
79         if ((off & 3) || off > sizeof(struct user) - 3)
80                 return -EIO;
81
82         off >>= 2;
83         switch (off) {
84         case PT_EVB:
85                 __asm__ __volatile__ (
86                         "mvfc   %0, cr5 \n\t"
87                         : "=r" (tmp)
88                 );
89                 break;
90         case PT_CBR: {
91                         unsigned long psw;
92                         psw = get_stack_long(tsk, PT_PSW);
93                         tmp = ((psw >> 8) & 1);
94                 }
95                 break;
96         case PT_PSW: {
97                         unsigned long psw, bbpsw;
98                         psw = get_stack_long(tsk, PT_PSW);
99                         bbpsw = get_stack_long(tsk, PT_BBPSW);
100                         tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
101                 }
102                 break;
103         case PT_PC:
104                 tmp = get_stack_long(tsk, PT_BPC);
105                 break;
106         case PT_BPC:
107                 off = PT_BBPC;
108                 /* fall through */
109         default:
110                 if (off < (sizeof(struct pt_regs) >> 2))
111                         tmp = get_stack_long(tsk, off);
112 #ifndef NO_FPU
113                 else if (off >= (long)(&dummy->fpu >> 2) &&
114                          off < (long)(&dummy->u_fpvalid >> 2)) {
115                         if (!tsk_used_math(tsk)) {
116                                 if (off == (long)(&dummy->fpu.fpscr >> 2))
117                                         tmp = FPSCR_INIT;
118                                 else
119                                         tmp = 0;
120                         } else
121                                 tmp = ((long *)(&tsk->thread.fpu >> 2))
122                                         [off - (long)&dummy->fpu];
123                 } else if (off == (long)(&dummy->u_fpvalid >> 2))
124                         tmp = !!tsk_used_math(tsk);
125 #endif /* not NO_FPU */
126                 else
127                         tmp = 0;
128         }
129
130         return put_user(tmp, data);
131 }
132
133 static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
134                              unsigned long data)
135 {
136         int ret = -EIO;
137 #ifndef NO_FPU
138         struct user * dummy = NULL;
139 #endif
140
141         if ((off & 3) || off > sizeof(struct user) - 3)
142                 return -EIO;
143
144         off >>= 2;
145         switch (off) {
146         case PT_EVB:
147         case PT_BPC:
148         case PT_SPI:
149                 /* We don't allow to modify evb. */
150                 ret = 0;
151                 break;
152         case PT_PSW:
153         case PT_CBR: {
154                         /* We allow to modify only cbr in psw */
155                         unsigned long psw;
156                         psw = get_stack_long(tsk, PT_PSW);
157                         psw = (psw & ~0x100) | ((data & 1) << 8);
158                         ret = put_stack_long(tsk, PT_PSW, psw);
159                 }
160                 break;
161         case PT_PC:
162                 off = PT_BPC;
163                 data &= ~1;
164                 /* fall through */
165         default:
166                 if (off < (sizeof(struct pt_regs) >> 2))
167                         ret = put_stack_long(tsk, off, data);
168 #ifndef NO_FPU
169                 else if (off >= (long)(&dummy->fpu >> 2) &&
170                          off < (long)(&dummy->u_fpvalid >> 2)) {
171                         set_stopped_child_used_math(tsk);
172                         ((long *)&tsk->thread.fpu)
173                                 [off - (long)&dummy->fpu] = data;
174                         ret = 0;
175                 } else if (off == (long)(&dummy->u_fpvalid >> 2)) {
176                         conditional_stopped_child_used_math(data, tsk);
177                         ret = 0;
178                 }
179 #endif /* not NO_FPU */
180                 break;
181         }
182
183         return ret;
184 }
185
186 /*
187  * Get all user integer registers.
188  */
189 static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
190 {
191         struct pt_regs *regs = task_pt_regs(tsk);
192
193         return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
194 }
195
196 /*
197  * Set all user integer registers.
198  */
199 static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
200 {
201         struct pt_regs newregs;
202         int ret;
203
204         ret = -EFAULT;
205         if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
206                 struct pt_regs *regs = task_pt_regs(tsk);
207                 *regs = newregs;
208                 ret = 0;
209         }
210
211         return ret;
212 }
213
214
215 static inline int
216 check_condition_bit(struct task_struct *child)
217 {
218         return (int)((get_stack_long(child, PT_PSW) >> 8) & 1);
219 }
220
221 static int
222 check_condition_src(unsigned long op, unsigned long regno1,
223                     unsigned long regno2, struct task_struct *child)
224 {
225         unsigned long reg1, reg2;
226
227         reg2 = get_stack_long(child, reg_offset[regno2]);
228
229         switch (op) {
230         case 0x0: /* BEQ */
231                 reg1 = get_stack_long(child, reg_offset[regno1]);
232                 return reg1 == reg2;
233         case 0x1: /* BNE */
234                 reg1 = get_stack_long(child, reg_offset[regno1]);
235                 return reg1 != reg2;
236         case 0x8: /* BEQZ */
237                 return reg2 == 0;
238         case 0x9: /* BNEZ */
239                 return reg2 != 0;
240         case 0xa: /* BLTZ */
241                 return (int)reg2 < 0;
242         case 0xb: /* BGEZ */
243                 return (int)reg2 >= 0;
244         case 0xc: /* BLEZ */
245                 return (int)reg2 <= 0;
246         case 0xd: /* BGTZ */
247                 return (int)reg2 > 0;
248         default:
249                 /* never reached */
250                 return 0;
251         }
252 }
253
254 static void
255 compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc,
256                                unsigned long *next_pc,
257                                struct task_struct *child)
258 {
259         unsigned long op, op2, op3;
260         unsigned long disp;
261         unsigned long regno;
262         int parallel = 0;
263
264         if (insn & 0x00008000)
265                 parallel = 1;
266         if (pc & 3)
267                 insn &= 0x7fff; /* right slot */
268         else
269                 insn >>= 16;    /* left slot */
270
271         op = (insn >> 12) & 0xf;
272         op2 = (insn >> 8) & 0xf;
273         op3 = (insn >> 4) & 0xf;
274
275         if (op == 0x7) {
276                 switch (op2) {
277                 case 0xd: /* BNC */
278                 case 0x9: /* BNCL */
279                         if (!check_condition_bit(child)) {
280                                 disp = (long)(insn << 24) >> 22;
281                                 *next_pc = (pc & ~0x3) + disp;
282                                 return;
283                         }
284                         break;
285                 case 0x8: /* BCL */
286                 case 0xc: /* BC */
287                         if (check_condition_bit(child)) {
288                                 disp = (long)(insn << 24) >> 22;
289                                 *next_pc = (pc & ~0x3) + disp;
290                                 return;
291                         }
292                         break;
293                 case 0xe: /* BL */
294                 case 0xf: /* BRA */
295                         disp = (long)(insn << 24) >> 22;
296                         *next_pc = (pc & ~0x3) + disp;
297                         return;
298                         break;
299                 }
300         } else if (op == 0x1) {
301                 switch (op2) {
302                 case 0x0:
303                         if (op3 == 0xf) { /* TRAP */
304 #if 1
305                                 /* pass through */
306 #else
307                                 /* kernel space is not allowed as next_pc */
308                                 unsigned long evb;
309                                 unsigned long trapno;
310                                 trapno = insn & 0xf;
311                                 __asm__ __volatile__ (
312                                         "mvfc %0, cr5\n"
313                                         :"=r"(evb)
314                                         :
315                                 );
316                                 *next_pc = evb + (trapno << 2);
317                                 return;
318 #endif
319                         } else if (op3 == 0xd) { /* RTE */
320                                 *next_pc = get_stack_long(child, PT_BPC);
321                                 return;
322                         }
323                         break;
324                 case 0xc: /* JC */
325                         if (op3 == 0xc && check_condition_bit(child)) {
326                                 regno = insn & 0xf;
327                                 *next_pc = get_stack_long(child,
328                                                           reg_offset[regno]);
329                                 return;
330                         }
331                         break;
332                 case 0xd: /* JNC */
333                         if (op3 == 0xc && !check_condition_bit(child)) {
334                                 regno = insn & 0xf;
335                                 *next_pc = get_stack_long(child,
336                                                           reg_offset[regno]);
337                                 return;
338                         }
339                         break;
340                 case 0xe: /* JL */
341                 case 0xf: /* JMP */
342                         if (op3 == 0xc) { /* JMP */
343                                 regno = insn & 0xf;
344                                 *next_pc = get_stack_long(child,
345                                                           reg_offset[regno]);
346                                 return;
347                         }
348                         break;
349                 }
350         }
351         if (parallel)
352                 *next_pc = pc + 4;
353         else
354                 *next_pc = pc + 2;
355 }
356
357 static void
358 compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc,
359                                unsigned long *next_pc,
360                                struct task_struct *child)
361 {
362         unsigned long op;
363         unsigned long op2;
364         unsigned long disp;
365         unsigned long regno1, regno2;
366
367         op = (insn >> 28) & 0xf;
368         if (op == 0xf) {        /* branch 24-bit relative */
369                 op2 = (insn >> 24) & 0xf;
370                 switch (op2) {
371                 case 0xd:       /* BNC */
372                 case 0x9:       /* BNCL */
373                         if (!check_condition_bit(child)) {
374                                 disp = (long)(insn << 8) >> 6;
375                                 *next_pc = (pc & ~0x3) + disp;
376                                 return;
377                         }
378                         break;
379                 case 0x8:       /* BCL */
380                 case 0xc:       /* BC */
381                         if (check_condition_bit(child)) {
382                                 disp = (long)(insn << 8) >> 6;
383                                 *next_pc = (pc & ~0x3) + disp;
384                                 return;
385                         }
386                         break;
387                 case 0xe:       /* BL */
388                 case 0xf:       /* BRA */
389                         disp = (long)(insn << 8) >> 6;
390                         *next_pc = (pc & ~0x3) + disp;
391                         return;
392                 }
393         } else if (op == 0xb) { /* branch 16-bit relative */
394                 op2 = (insn >> 20) & 0xf;
395                 switch (op2) {
396                 case 0x0: /* BEQ */
397                 case 0x1: /* BNE */
398                 case 0x8: /* BEQZ */
399                 case 0x9: /* BNEZ */
400                 case 0xa: /* BLTZ */
401                 case 0xb: /* BGEZ */
402                 case 0xc: /* BLEZ */
403                 case 0xd: /* BGTZ */
404                         regno1 = ((insn >> 24) & 0xf);
405                         regno2 = ((insn >> 16) & 0xf);
406                         if (check_condition_src(op2, regno1, regno2, child)) {
407                                 disp = (long)(insn << 16) >> 14;
408                                 *next_pc = (pc & ~0x3) + disp;
409                                 return;
410                         }
411                         break;
412                 }
413         }
414         *next_pc = pc + 4;
415 }
416
417 static inline void
418 compute_next_pc(unsigned long insn, unsigned long pc,
419                 unsigned long *next_pc, struct task_struct *child)
420 {
421         if (insn & 0x80000000)
422                 compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
423         else
424                 compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
425 }
426
427 static int
428 register_debug_trap(struct task_struct *child, unsigned long next_pc,
429         unsigned long next_insn, unsigned long *code)
430 {
431         struct debug_trap *p = &child->thread.debug_trap;
432         unsigned long addr = next_pc & ~3;
433
434         if (p->nr_trap == MAX_TRAPS) {
435                 printk("kernel BUG at %s %d: p->nr_trap = %d\n",
436                                         __FILE__, __LINE__, p->nr_trap);
437                 return -1;
438         }
439         p->addr[p->nr_trap] = addr;
440         p->insn[p->nr_trap] = next_insn;
441         p->nr_trap++;
442         if (next_pc & 3) {
443                 *code = (next_insn & 0xffff0000) | 0x10f1;
444                 /* xxx --> TRAP1 */
445         } else {
446                 if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
447                         *code = 0x10f17000;
448                         /* TRAP1 --> NOP */
449                 } else {
450                         *code = (next_insn & 0xffff) | 0x10f10000;
451                         /* TRAP1 --> xxx */
452                 }
453         }
454         return 0;
455 }
456
457 static int
458 unregister_debug_trap(struct task_struct *child, unsigned long addr,
459                       unsigned long *code)
460 {
461         struct debug_trap *p = &child->thread.debug_trap;
462         int i;
463
464         /* Search debug trap entry. */
465         for (i = 0; i < p->nr_trap; i++) {
466                 if (p->addr[i] == addr)
467                         break;
468         }
469         if (i >= p->nr_trap) {
470                 /* The trap may be requested from debugger.
471                  * ptrace should do nothing in this case.
472                  */
473                 return 0;
474         }
475
476         /* Recover original instruction code. */
477         *code = p->insn[i];
478
479         /* Shift debug trap entries. */
480         while (i < p->nr_trap - 1) {
481                 p->insn[i] = p->insn[i + 1];
482                 p->addr[i] = p->addr[i + 1];
483                 i++;
484         }
485         p->nr_trap--;
486         return 1;
487 }
488
489 static void
490 unregister_all_debug_traps(struct task_struct *child)
491 {
492         struct debug_trap *p = &child->thread.debug_trap;
493         int i;
494
495         for (i = 0; i < p->nr_trap; i++)
496                 access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
497         p->nr_trap = 0;
498 }
499
500 static inline void
501 invalidate_cache(void)
502 {
503 #if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
504
505         _flush_cache_copyback_all();
506
507 #else   /* ! CONFIG_CHIP_M32700 */
508
509         /* Invalidate cache */
510         __asm__ __volatile__ (
511                 "ldi    r0, #-1                                 \n\t"
512                 "ldi    r1, #0                                  \n\t"
513                 "stb    r1, @r0         ; cache off             \n\t"
514                 ";                                              \n\t"
515                 "ldi    r0, #-2                                 \n\t"
516                 "ldi    r1, #1                                  \n\t"
517                 "stb    r1, @r0         ; cache invalidate      \n\t"
518                 ".fillinsn                                      \n"
519                 "0:                                             \n\t"
520                 "ldb    r1, @r0         ; invalidate check      \n\t"
521                 "bnez   r1, 0b                                  \n\t"
522                 ";                                              \n\t"
523                 "ldi    r0, #-1                                 \n\t"
524                 "ldi    r1, #1                                  \n\t"
525                 "stb    r1, @r0         ; cache on              \n\t"
526                 : : : "r0", "r1", "memory"
527         );
528         /* FIXME: copying-back d-cache and invalidating i-cache are needed.
529          */
530 #endif  /* CONFIG_CHIP_M32700 */
531 }
532
533 /* Embed a debug trap (TRAP1) code */
534 static int
535 embed_debug_trap(struct task_struct *child, unsigned long next_pc)
536 {
537         unsigned long next_insn, code;
538         unsigned long addr = next_pc & ~3;
539
540         if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
541             != sizeof(next_insn)) {
542                 return -1; /* error */
543         }
544
545         /* Set a trap code. */
546         if (register_debug_trap(child, next_pc, next_insn, &code)) {
547                 return -1; /* error */
548         }
549         if (access_process_vm(child, addr, &code, sizeof(code), 1)
550             != sizeof(code)) {
551                 return -1; /* error */
552         }
553         return 0; /* success */
554 }
555
556 void
557 withdraw_debug_trap(struct pt_regs *regs)
558 {
559         unsigned long addr;
560         unsigned long code;
561
562         addr = (regs->bpc - 2) & ~3;
563         regs->bpc -= 2;
564         if (unregister_debug_trap(current, addr, &code)) {
565             access_process_vm(current, addr, &code, sizeof(code), 1);
566             invalidate_cache();
567         }
568 }
569
570 void
571 init_debug_traps(struct task_struct *child)
572 {
573         struct debug_trap *p = &child->thread.debug_trap;
574         int i;
575         p->nr_trap = 0;
576         for (i = 0; i < MAX_TRAPS; i++) {
577                 p->addr[i] = 0;
578                 p->insn[i] = 0;
579         }
580 }
581
582 void user_enable_single_step(struct task_struct *child)
583 {
584         unsigned long next_pc;
585         unsigned long pc, insn;
586
587         clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
588
589         /* Compute next pc.  */
590         pc = get_stack_long(child, PT_BPC);
591
592         if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
593             != sizeof(insn))
594                 return;
595
596         compute_next_pc(insn, pc, &next_pc, child);
597         if (next_pc & 0x80000000)
598                 return;
599
600         if (embed_debug_trap(child, next_pc))
601                 return;
602
603         invalidate_cache();
604 }
605
606 void user_disable_single_step(struct task_struct *child)
607 {
608         unregister_all_debug_traps(child);
609         invalidate_cache();
610 }
611
612 /*
613  * Called by kernel/ptrace.c when detaching..
614  *
615  * Make sure single step bits etc are not set.
616  */
617 void ptrace_disable(struct task_struct *child)
618 {
619         /* nothing to do.. */
620 }
621
622 long
623 arch_ptrace(struct task_struct *child, long request,
624             unsigned long addr, unsigned long data)
625 {
626         int ret;
627         unsigned long __user *datap = (unsigned long __user *) data;
628
629         switch (request) {
630         /*
631          * read word at location "addr" in the child process.
632          */
633         case PTRACE_PEEKTEXT:
634         case PTRACE_PEEKDATA:
635                 ret = generic_ptrace_peekdata(child, addr, data);
636                 break;
637
638         /*
639          * read the word at location addr in the USER area.
640          */
641         case PTRACE_PEEKUSR:
642                 ret = ptrace_read_user(child, addr, datap);
643                 break;
644
645         /*
646          * write the word at location addr.
647          */
648         case PTRACE_POKETEXT:
649         case PTRACE_POKEDATA:
650                 ret = generic_ptrace_pokedata(child, addr, data);
651                 if (ret == 0 && request == PTRACE_POKETEXT)
652                         invalidate_cache();
653                 break;
654
655         /*
656          * write the word at location addr in the USER area.
657          */
658         case PTRACE_POKEUSR:
659                 ret = ptrace_write_user(child, addr, data);
660                 break;
661
662         case PTRACE_GETREGS:
663                 ret = ptrace_getregs(child, datap);
664                 break;
665
666         case PTRACE_SETREGS:
667                 ret = ptrace_setregs(child, datap);
668                 break;
669
670         default:
671                 ret = ptrace_request(child, request, addr, data);
672                 break;
673         }
674
675         return ret;
676 }
677
678 /* notification of system call entry/exit
679  * - triggered by current->work.syscall_trace
680  */
681 void do_syscall_trace(void)
682 {
683         if (!test_thread_flag(TIF_SYSCALL_TRACE))
684                 return;
685         if (!(current->ptrace & PT_PTRACED))
686                 return;
687         /* the 0x80 provides a way for the tracing parent to distinguish
688            between a syscall stop and SIGTRAP delivery */
689         ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
690                                  ? 0x80 : 0));
691
692         /*
693          * this isn't the same as continuing with a signal, but it will do
694          * for normal use.  strace only continues with a signal if the
695          * stopping signal is not SIGTRAP.  -brl
696          */
697         if (current->exit_code) {
698                 send_sig(current->exit_code, current, 1);
699                 current->exit_code = 0;
700         }
701 }