2 * Kernel support for the ptrace() and syscall tracing interfaces.
4 * Copyright (C) 1999-2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2006 Intel Co
7 * 2006-08-12 - IA64 Native Utrace implementation support added by
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 * Derived from the x86 and Alpha versions.
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/sched/task.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/user.h>
19 #include <linux/security.h>
20 #include <linux/audit.h>
21 #include <linux/signal.h>
22 #include <linux/regset.h>
23 #include <linux/elf.h>
24 #include <linux/tracehook.h>
26 #include <asm/pgtable.h>
27 #include <asm/processor.h>
28 #include <asm/ptrace_offsets.h>
30 #include <linux/uaccess.h>
31 #include <asm/unwind.h>
33 #include <asm/perfmon.h>
39 * Bits in the PSR that we allow ptrace() to change:
40 * be, up, ac, mfl, mfh (the user mask; five bits total)
41 * db (debug breakpoint fault; one bit)
42 * id (instruction debug fault disable; one bit)
43 * dd (data debug fault disable; one bit)
44 * ri (restart instruction; two bits)
45 * is (instruction set; one bit)
47 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
48 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
50 #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
51 #define PFM_MASK MASK(38)
53 #define PTRACE_DEBUG 0
56 # define dprintk(format...) printk(format)
59 # define dprintk(format...)
62 /* Return TRUE if PT was created due to kernel-entry via a system-call. */
65 in_syscall (struct pt_regs *pt)
67 return (long) pt->cr_ifs >= 0;
71 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
72 * bitset where bit i is set iff the NaT bit of register i is set.
75 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
77 # define GET_BITS(first, last, unat) \
79 unsigned long bit = ia64_unat_pos(&pt->r##first); \
80 unsigned long nbits = (last - first + 1); \
81 unsigned long mask = MASK(nbits) << first; \
84 dist = 64 + bit - first; \
87 ia64_rotr(unat, dist) & mask; \
92 * Registers that are stored consecutively in struct pt_regs
93 * can be handled in parallel. If the register order in
94 * struct_pt_regs changes, this code MUST be updated.
96 val = GET_BITS( 1, 1, scratch_unat);
97 val |= GET_BITS( 2, 3, scratch_unat);
98 val |= GET_BITS(12, 13, scratch_unat);
99 val |= GET_BITS(14, 14, scratch_unat);
100 val |= GET_BITS(15, 15, scratch_unat);
101 val |= GET_BITS( 8, 11, scratch_unat);
102 val |= GET_BITS(16, 31, scratch_unat);
109 * Set the NaT bits for the scratch registers according to NAT and
110 * return the resulting unat (assuming the scratch registers are
114 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
116 # define PUT_BITS(first, last, nat) \
118 unsigned long bit = ia64_unat_pos(&pt->r##first); \
119 unsigned long nbits = (last - first + 1); \
120 unsigned long mask = MASK(nbits) << first; \
123 dist = 64 + bit - first; \
125 dist = bit - first; \
126 ia64_rotl(nat & mask, dist); \
128 unsigned long scratch_unat;
131 * Registers that are stored consecutively in struct pt_regs
132 * can be handled in parallel. If the register order in
133 * struct_pt_regs changes, this code MUST be updated.
135 scratch_unat = PUT_BITS( 1, 1, nat);
136 scratch_unat |= PUT_BITS( 2, 3, nat);
137 scratch_unat |= PUT_BITS(12, 13, nat);
138 scratch_unat |= PUT_BITS(14, 14, nat);
139 scratch_unat |= PUT_BITS(15, 15, nat);
140 scratch_unat |= PUT_BITS( 8, 11, nat);
141 scratch_unat |= PUT_BITS(16, 31, nat);
148 #define IA64_MLX_TEMPLATE 0x2
149 #define IA64_MOVL_OPCODE 6
152 ia64_increment_ip (struct pt_regs *regs)
154 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
159 } else if (ri == 2) {
160 get_user(w0, (char __user *) regs->cr_iip + 0);
161 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
163 * rfi'ing to slot 2 of an MLX bundle causes
164 * an illegal operation fault. We don't want
171 ia64_psr(regs)->ri = ri;
175 ia64_decrement_ip (struct pt_regs *regs)
177 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
179 if (ia64_psr(regs)->ri == 0) {
182 get_user(w0, (char __user *) regs->cr_iip + 0);
183 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
185 * rfi'ing to slot 2 of an MLX bundle causes
186 * an illegal operation fault. We don't want
192 ia64_psr(regs)->ri = ri;
196 * This routine is used to read an rnat bits that are stored on the
197 * kernel backing store. Since, in general, the alignment of the user
198 * and kernel are different, this is not completely trivial. In
199 * essence, we need to construct the user RNAT based on up to two
200 * kernel RNAT values and/or the RNAT value saved in the child's
205 * +--------+ <-- lowest address
212 * | slot01 | > child_regs->ar_rnat
214 * | slot02 | / kernel rbs
215 * +--------+ +--------+
216 * <- child_regs->ar_bspstore | slot61 | <-- krbs
217 * +- - - - + +--------+
219 * +- - - - + +--------+
221 * +- - - - + +--------+
223 * +- - - - + +--------+
228 * | slot01 | > child_stack->ar_rnat
232 * <--- child_stack->ar_bspstore
234 * The way to think of this code is as follows: bit 0 in the user rnat
235 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
236 * value. The kernel rnat value holding this bit is stored in
237 * variable rnat0. rnat1 is loaded with the kernel rnat value that
238 * form the upper bits of the user rnat value.
242 * o when reading the rnat "below" the first rnat slot on the kernel
243 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
244 * merged in from pt->ar_rnat.
246 * o when reading the rnat "above" the last rnat slot on the kernel
247 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
250 get_rnat (struct task_struct *task, struct switch_stack *sw,
251 unsigned long *krbs, unsigned long *urnat_addr,
252 unsigned long *urbs_end)
254 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
255 unsigned long umask = 0, mask, m;
256 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
257 long num_regs, nbits;
260 pt = task_pt_regs(task);
261 kbsp = (unsigned long *) sw->ar_bspstore;
262 ubspstore = (unsigned long *) pt->ar_bspstore;
264 if (urbs_end < urnat_addr)
265 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
270 * First, figure out which bit number slot 0 in user-land maps
271 * to in the kernel rnat. Do this by figuring out how many
272 * register slots we're beyond the user's backingstore and
273 * then computing the equivalent address in kernel space.
275 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
276 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
277 shift = ia64_rse_slot_num(slot0_kaddr);
278 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
279 rnat0_kaddr = rnat1_kaddr - 64;
281 if (ubspstore + 63 > urnat_addr) {
282 /* some bits need to be merged in from pt->ar_rnat */
283 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
284 urnat = (pt->ar_rnat & umask);
291 if (rnat0_kaddr >= kbsp)
293 else if (rnat0_kaddr > krbs)
294 rnat0 = *rnat0_kaddr;
295 urnat |= (rnat0 & m) >> shift;
297 m = mask >> (63 - shift);
298 if (rnat1_kaddr >= kbsp)
300 else if (rnat1_kaddr > krbs)
301 rnat1 = *rnat1_kaddr;
302 urnat |= (rnat1 & m) << (63 - shift);
307 * The reverse of get_rnat.
310 put_rnat (struct task_struct *task, struct switch_stack *sw,
311 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
312 unsigned long *urbs_end)
314 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
315 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
316 long num_regs, nbits;
318 unsigned long cfm, *urbs_kargs;
320 pt = task_pt_regs(task);
321 kbsp = (unsigned long *) sw->ar_bspstore;
322 ubspstore = (unsigned long *) pt->ar_bspstore;
324 urbs_kargs = urbs_end;
325 if (in_syscall(pt)) {
327 * If entered via syscall, don't allow user to set rnat bits
331 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
334 if (urbs_kargs >= urnat_addr)
337 if ((urnat_addr - 63) >= urbs_kargs)
339 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
344 * First, figure out which bit number slot 0 in user-land maps
345 * to in the kernel rnat. Do this by figuring out how many
346 * register slots we're beyond the user's backingstore and
347 * then computing the equivalent address in kernel space.
349 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
350 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
351 shift = ia64_rse_slot_num(slot0_kaddr);
352 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
353 rnat0_kaddr = rnat1_kaddr - 64;
355 if (ubspstore + 63 > urnat_addr) {
356 /* some bits need to be place in pt->ar_rnat: */
357 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
358 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
364 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
365 * rnat slot is ignored. so we don't have to clear it here.
367 rnat0 = (urnat << shift);
369 if (rnat0_kaddr >= kbsp)
370 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
371 else if (rnat0_kaddr > krbs)
372 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
374 rnat1 = (urnat >> (63 - shift));
375 m = mask >> (63 - shift);
376 if (rnat1_kaddr >= kbsp)
377 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
378 else if (rnat1_kaddr > krbs)
379 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
383 on_kernel_rbs (unsigned long addr, unsigned long bspstore,
384 unsigned long urbs_end)
386 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
388 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
392 * Read a word from the user-level backing store of task CHILD. ADDR
393 * is the user-level address to read the word from, VAL a pointer to
394 * the return value, and USER_BSP gives the end of the user-level
395 * backing store (i.e., it's the address that would be in ar.bsp after
396 * the user executed a "cover" instruction).
398 * This routine takes care of accessing the kernel register backing
399 * store for those registers that got spilled there. It also takes
400 * care of calculating the appropriate RNaT collection words.
403 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
404 unsigned long user_rbs_end, unsigned long addr, long *val)
406 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
407 struct pt_regs *child_regs;
411 urbs_end = (long *) user_rbs_end;
412 laddr = (unsigned long *) addr;
413 child_regs = task_pt_regs(child);
414 bspstore = (unsigned long *) child_regs->ar_bspstore;
415 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
416 if (on_kernel_rbs(addr, (unsigned long) bspstore,
417 (unsigned long) urbs_end))
420 * Attempt to read the RBS in an area that's actually
421 * on the kernel RBS => read the corresponding bits in
424 rnat_addr = ia64_rse_rnat_addr(laddr);
425 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
427 if (laddr == rnat_addr) {
428 /* return NaT collection word itself */
433 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
435 * It is implementation dependent whether the
436 * data portion of a NaT value gets saved on a
437 * st8.spill or RSE spill (e.g., see EAS 2.6,
438 * 4.4.4.6 Register Spill and Fill). To get
439 * consistent behavior across all possible
440 * IA-64 implementations, we return zero in
447 if (laddr < urbs_end) {
449 * The desired word is on the kernel RBS and
452 regnum = ia64_rse_num_regs(bspstore, laddr);
453 *val = *ia64_rse_skip_regs(krbs, regnum);
457 copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
458 if (copied != sizeof(ret))
465 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
466 unsigned long user_rbs_end, unsigned long addr, long val)
468 unsigned long *bspstore, *krbs, regnum, *laddr;
469 unsigned long *urbs_end = (long *) user_rbs_end;
470 struct pt_regs *child_regs;
472 laddr = (unsigned long *) addr;
473 child_regs = task_pt_regs(child);
474 bspstore = (unsigned long *) child_regs->ar_bspstore;
475 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
476 if (on_kernel_rbs(addr, (unsigned long) bspstore,
477 (unsigned long) urbs_end))
480 * Attempt to write the RBS in an area that's actually
481 * on the kernel RBS => write the corresponding bits
484 if (ia64_rse_is_rnat_slot(laddr))
485 put_rnat(child, child_stack, krbs, laddr, val,
488 if (laddr < urbs_end) {
489 regnum = ia64_rse_num_regs(bspstore, laddr);
490 *ia64_rse_skip_regs(krbs, regnum) = val;
493 } else if (access_process_vm(child, addr, &val, sizeof(val),
494 FOLL_FORCE | FOLL_WRITE)
501 * Calculate the address of the end of the user-level register backing
502 * store. This is the address that would have been stored in ar.bsp
503 * if the user had executed a "cover" instruction right before
504 * entering the kernel. If CFMP is not NULL, it is used to return the
505 * "current frame mask" that was active at the time the kernel was
509 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
512 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
515 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
516 bspstore = (unsigned long *) pt->ar_bspstore;
517 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
520 ndirty += (cfm & 0x7f);
522 cfm &= ~(1UL << 63); /* clear valid bit */
526 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
530 * Synchronize (i.e, write) the RSE backing store living in kernel
531 * space to the VM of the CHILD task. SW and PT are the pointers to
532 * the switch_stack and pt_regs structures, respectively.
533 * USER_RBS_END is the user-level address at which the backing store
537 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
538 unsigned long user_rbs_start, unsigned long user_rbs_end)
540 unsigned long addr, val;
543 /* now copy word for word from kernel rbs to user rbs: */
544 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
545 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
548 if (access_process_vm(child, addr, &val, sizeof(val),
549 FOLL_FORCE | FOLL_WRITE)
557 ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
558 unsigned long user_rbs_start, unsigned long user_rbs_end)
560 unsigned long addr, val;
563 /* now copy word for word from user rbs to kernel rbs: */
564 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
565 if (access_process_vm(child, addr, &val, sizeof(val),
570 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
577 typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
578 unsigned long, unsigned long);
580 static void do_sync_rbs(struct unw_frame_info *info, void *arg)
583 unsigned long urbs_end;
586 if (unw_unwind_to_user(info) < 0)
588 pt = task_pt_regs(info->task);
589 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
591 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
595 * when a thread is stopped (ptraced), debugger might change thread's user
596 * stack (change memory directly), and we must avoid the RSE stored in kernel
597 * to override user stack (user space's RSE is newer than kernel's in the
598 * case). To workaround the issue, we copy kernel RSE to user RSE before the
599 * task is stopped, so user RSE has updated data. we then copy user RSE to
600 * kernel after the task is resummed from traced stop and kernel will use the
601 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
602 * synchronize user RSE to kernel.
604 void ia64_ptrace_stop(void)
606 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
608 set_notify_resume(current);
609 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
613 * This is called to read back the register backing store.
615 void ia64_sync_krbs(void)
617 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
619 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
623 * After PTRACE_ATTACH, a thread's register backing store area in user
624 * space is assumed to contain correct data whenever the thread is
625 * stopped. arch_ptrace_stop takes care of this on tracing stops.
626 * But if the child was already stopped for job control when we attach
627 * to it, then it might not ever get into ptrace_stop by the time we
628 * want to examine the user memory containing the RBS.
631 ptrace_attach_sync_user_rbs (struct task_struct *child)
634 struct unw_frame_info info;
637 * If the child is in TASK_STOPPED, we need to change that to
638 * TASK_TRACED momentarily while we operate on it. This ensures
639 * that the child won't be woken up and return to user mode while
640 * we are doing the sync. (It can only be woken up for SIGKILL.)
643 read_lock(&tasklist_lock);
644 if (child->sighand) {
645 spin_lock_irq(&child->sighand->siglock);
646 if (child->state == TASK_STOPPED &&
647 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
648 set_notify_resume(child);
650 child->state = TASK_TRACED;
653 spin_unlock_irq(&child->sighand->siglock);
655 read_unlock(&tasklist_lock);
660 unw_init_from_blocked_task(&info, child);
661 do_sync_rbs(&info, ia64_sync_user_rbs);
664 * Now move the child back into TASK_STOPPED if it should be in a
665 * job control stop, so that SIGCONT can be used to wake it up.
667 read_lock(&tasklist_lock);
668 if (child->sighand) {
669 spin_lock_irq(&child->sighand->siglock);
670 if (child->state == TASK_TRACED &&
671 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
672 child->state = TASK_STOPPED;
674 spin_unlock_irq(&child->sighand->siglock);
676 read_unlock(&tasklist_lock);
680 * Write f32-f127 back to task->thread.fph if it has been modified.
683 ia64_flush_fph (struct task_struct *task)
685 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
688 * Prevent migrating this task while
689 * we're fiddling with the FPU state
692 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
694 task->thread.flags |= IA64_THREAD_FPH_VALID;
695 ia64_save_fpu(&task->thread.fph[0]);
701 * Sync the fph state of the task so that it can be manipulated
702 * through thread.fph. If necessary, f32-f127 are written back to
703 * thread.fph or, if the fph state hasn't been used before, thread.fph
704 * is cleared to zeroes. Also, access to f32-f127 is disabled to
705 * ensure that the task picks up the state from thread.fph when it
709 ia64_sync_fph (struct task_struct *task)
711 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
713 ia64_flush_fph(task);
714 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
715 task->thread.flags |= IA64_THREAD_FPH_VALID;
716 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
723 * Change the machine-state of CHILD such that it will return via the normal
724 * kernel exit-path, rather than the syscall-exit path.
727 convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
730 struct unw_frame_info info, prev_info;
731 unsigned long ip, sp, pr;
733 unw_init_from_blocked_task(&info, child);
736 if (unw_unwind(&info) < 0)
739 unw_get_sp(&info, &sp);
740 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
741 < IA64_PT_REGS_SIZE) {
742 dprintk("ptrace.%s: ran off the top of the kernel "
743 "stack\n", __func__);
746 if (unw_get_pr (&prev_info, &pr) < 0) {
747 unw_get_rp(&prev_info, &ip);
748 dprintk("ptrace.%s: failed to read "
749 "predicate register (ip=0x%lx)\n",
753 if (unw_is_intr_frame(&info)
754 && (pr & (1UL << PRED_USER_STACK)))
759 * Note: at the time of this call, the target task is blocked
760 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
761 * (aka, "pLvSys") we redirect execution from
762 * .work_pending_syscall_end to .work_processed_kernel.
764 unw_get_pr(&prev_info, &pr);
765 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
766 pr |= (1UL << PRED_NON_SYSCALL);
767 unw_set_pr(&prev_info, pr);
769 pt->cr_ifs = (1UL << 63) | cfm;
771 * Clear the memory that is NOT written on syscall-entry to
772 * ensure we do not leak kernel-state to user when execution
778 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
779 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
787 access_nat_bits (struct task_struct *child, struct pt_regs *pt,
788 struct unw_frame_info *info,
789 unsigned long *data, int write_access)
791 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
796 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
797 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
798 dprintk("ptrace: failed to set ar.unat\n");
801 for (regnum = 4; regnum <= 7; ++regnum) {
802 unw_get_gr(info, regnum, &dummy, &nat);
803 unw_set_gr(info, regnum, dummy,
804 (nat_bits >> regnum) & 1);
807 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
808 dprintk("ptrace: failed to read ar.unat\n");
811 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
812 for (regnum = 4; regnum <= 7; ++regnum) {
813 unw_get_gr(info, regnum, &dummy, &nat);
814 nat_bits |= (nat != 0) << regnum;
822 access_uarea (struct task_struct *child, unsigned long addr,
823 unsigned long *data, int write_access);
826 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
828 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
829 struct unw_frame_info info;
830 struct ia64_fpreg fpval;
831 struct switch_stack *sw;
833 long ret, retval = 0;
837 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
840 pt = task_pt_regs(child);
841 sw = (struct switch_stack *) (child->thread.ksp + 16);
842 unw_init_from_blocked_task(&info, child);
843 if (unw_unwind_to_user(&info) < 0) {
847 if (((unsigned long) ppr & 0x7) != 0) {
848 dprintk("ptrace:unaligned register address %p\n", ppr);
852 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
853 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
854 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
855 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
856 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
857 || access_uarea(child, PT_CFM, &cfm, 0)
858 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
863 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
864 retval |= __put_user(psr, &ppr->cr_ipsr);
868 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
869 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
870 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
871 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
872 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
873 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
875 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
876 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
877 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
878 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
879 retval |= __put_user(cfm, &ppr->cfm);
883 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
884 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
888 for (i = 4; i < 8; i++) {
889 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
891 retval |= __put_user(val, &ppr->gr[i]);
896 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
900 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
901 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
902 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
906 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
910 retval |= __put_user(pt->b0, &ppr->br[0]);
914 for (i = 1; i < 6; i++) {
915 if (unw_access_br(&info, i, &val, 0) < 0)
917 __put_user(val, &ppr->br[i]);
922 retval |= __put_user(pt->b6, &ppr->br[6]);
923 retval |= __put_user(pt->b7, &ppr->br[7]);
927 for (i = 2; i < 6; i++) {
928 if (unw_get_fr(&info, i, &fpval) < 0)
930 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
935 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
936 sizeof(struct ia64_fpreg) * 6);
938 /* fp scratch regs(12-15) */
940 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
941 sizeof(struct ia64_fpreg) * 4);
945 for (i = 16; i < 32; i++) {
946 if (unw_get_fr(&info, i, &fpval) < 0)
948 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
953 ia64_flush_fph(child);
954 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
955 sizeof(ppr->fr[32]) * 96);
959 retval |= __put_user(pt->pr, &ppr->pr);
963 retval |= __put_user(nat_bits, &ppr->nat);
965 ret = retval ? -EIO : 0;
970 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
972 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
973 struct unw_frame_info info;
974 struct switch_stack *sw;
975 struct ia64_fpreg fpval;
977 long ret, retval = 0;
980 memset(&fpval, 0, sizeof(fpval));
982 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
985 pt = task_pt_regs(child);
986 sw = (struct switch_stack *) (child->thread.ksp + 16);
987 unw_init_from_blocked_task(&info, child);
988 if (unw_unwind_to_user(&info) < 0) {
992 if (((unsigned long) ppr & 0x7) != 0) {
993 dprintk("ptrace:unaligned register address %p\n", ppr);
999 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1000 retval |= __get_user(psr, &ppr->cr_ipsr);
1004 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1005 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1006 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1007 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1008 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1009 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1011 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1012 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1013 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1014 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1015 retval |= __get_user(cfm, &ppr->cfm);
1019 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1020 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1024 for (i = 4; i < 8; i++) {
1025 retval |= __get_user(val, &ppr->gr[i]);
1026 /* NaT bit will be set via PT_NAT_BITS: */
1027 if (unw_set_gr(&info, i, val, 0) < 0)
1033 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1037 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1038 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1039 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1043 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1047 retval |= __get_user(pt->b0, &ppr->br[0]);
1051 for (i = 1; i < 6; i++) {
1052 retval |= __get_user(val, &ppr->br[i]);
1053 unw_set_br(&info, i, val);
1058 retval |= __get_user(pt->b6, &ppr->br[6]);
1059 retval |= __get_user(pt->b7, &ppr->br[7]);
1063 for (i = 2; i < 6; i++) {
1064 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1065 if (unw_set_fr(&info, i, fpval) < 0)
1071 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1072 sizeof(ppr->fr[6]) * 6);
1074 /* fp scratch regs(12-15) */
1076 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1077 sizeof(ppr->fr[12]) * 4);
1081 for (i = 16; i < 32; i++) {
1082 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1084 if (unw_set_fr(&info, i, fpval) < 0)
1090 ia64_sync_fph(child);
1091 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1092 sizeof(ppr->fr[32]) * 96);
1096 retval |= __get_user(pt->pr, &ppr->pr);
1100 retval |= __get_user(nat_bits, &ppr->nat);
1102 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1103 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1104 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1105 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1106 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1107 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1108 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1109 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1111 ret = retval ? -EIO : 0;
1116 user_enable_single_step (struct task_struct *child)
1118 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1120 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1125 user_enable_block_step (struct task_struct *child)
1127 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1129 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1134 user_disable_single_step (struct task_struct *child)
1136 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1138 /* make sure the single step/taken-branch trap bits are not set: */
1139 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1145 * Called by kernel/ptrace.c when detaching..
1147 * Make sure the single step bit is not set.
1150 ptrace_disable (struct task_struct *child)
1152 user_disable_single_step(child);
1156 arch_ptrace (struct task_struct *child, long request,
1157 unsigned long addr, unsigned long data)
1160 case PTRACE_PEEKTEXT:
1161 case PTRACE_PEEKDATA:
1162 /* read word at location addr */
1163 if (ptrace_access_vm(child, addr, &data, sizeof(data),
1167 /* ensure return value is not mistaken for error code */
1168 force_successful_syscall_return();
1171 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1172 * by the generic ptrace_request().
1175 case PTRACE_PEEKUSR:
1176 /* read the word at addr in the USER area */
1177 if (access_uarea(child, addr, &data, 0) < 0)
1179 /* ensure return value is not mistaken for error code */
1180 force_successful_syscall_return();
1183 case PTRACE_POKEUSR:
1184 /* write the word at addr in the USER area */
1185 if (access_uarea(child, addr, &data, 1) < 0)
1189 case PTRACE_OLD_GETSIGINFO:
1190 /* for backwards-compatibility */
1191 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1193 case PTRACE_OLD_SETSIGINFO:
1194 /* for backwards-compatibility */
1195 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1197 case PTRACE_GETREGS:
1198 return ptrace_getregs(child,
1199 (struct pt_all_user_regs __user *) data);
1201 case PTRACE_SETREGS:
1202 return ptrace_setregs(child,
1203 (struct pt_all_user_regs __user *) data);
1206 return ptrace_request(child, request, addr, data);
1211 /* "asmlinkage" so the input arguments are preserved... */
1214 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1215 long arg4, long arg5, long arg6, long arg7,
1216 struct pt_regs regs)
1218 if (test_thread_flag(TIF_SYSCALL_TRACE))
1219 if (tracehook_report_syscall_entry(®s))
1222 /* copy user rbs to kernel rbs */
1223 if (test_thread_flag(TIF_RESTORE_RSE))
1227 audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
1232 /* "asmlinkage" so the input arguments are preserved... */
1235 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1236 long arg4, long arg5, long arg6, long arg7,
1237 struct pt_regs regs)
1241 audit_syscall_exit(®s);
1243 step = test_thread_flag(TIF_SINGLESTEP);
1244 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1245 tracehook_report_syscall_exit(®s, step);
1247 /* copy user rbs to kernel rbs */
1248 if (test_thread_flag(TIF_RESTORE_RSE))
1252 /* Utrace implementation starts here */
1260 const void __user *ubuf;
1263 struct regset_getset {
1264 struct task_struct *target;
1265 const struct user_regset *regset;
1267 struct regset_get get;
1268 struct regset_set set;
1276 access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1277 unsigned long addr, unsigned long *data, int write_access)
1280 unsigned long *ptr = NULL;
1284 pt = task_pt_regs(target);
1286 case ELF_GR_OFFSET(1):
1289 case ELF_GR_OFFSET(2):
1290 case ELF_GR_OFFSET(3):
1291 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1293 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1295 /* read NaT bit first: */
1296 unsigned long dummy;
1298 ret = unw_get_gr(info, addr/8, &dummy, &nat);
1302 return unw_access_gr(info, addr/8, data, &nat, write_access);
1303 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1304 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1306 case ELF_GR_OFFSET(12):
1307 case ELF_GR_OFFSET(13):
1308 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1310 case ELF_GR_OFFSET(14):
1313 case ELF_GR_OFFSET(15):
1324 access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1325 unsigned long addr, unsigned long *data, int write_access)
1328 unsigned long *ptr = NULL;
1330 pt = task_pt_regs(target);
1332 case ELF_BR_OFFSET(0):
1335 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1336 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1337 data, write_access);
1338 case ELF_BR_OFFSET(6):
1341 case ELF_BR_OFFSET(7):
1352 access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1353 unsigned long addr, unsigned long *data, int write_access)
1356 unsigned long cfm, urbs_end;
1357 unsigned long *ptr = NULL;
1359 pt = task_pt_regs(target);
1360 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1362 case ELF_AR_RSC_OFFSET:
1365 pt->ar_rsc = *data | (3 << 2);
1369 case ELF_AR_BSP_OFFSET:
1371 * By convention, we use PT_AR_BSP to refer to
1372 * the end of the user-level backing store.
1373 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1374 * to get the real value of ar.bsp at the time
1375 * the kernel was entered.
1377 * Furthermore, when changing the contents of
1378 * PT_AR_BSP (or PT_CFM) while the task is
1379 * blocked in a system call, convert the state
1380 * so that the non-system-call exit
1381 * path is used. This ensures that the proper
1382 * state will be picked up when resuming
1383 * execution. However, it *also* means that
1384 * once we write PT_AR_BSP/PT_CFM, it won't be
1385 * possible to modify the syscall arguments of
1386 * the pending system call any longer. This
1387 * shouldn't be an issue because modifying
1388 * PT_AR_BSP/PT_CFM generally implies that
1389 * we're either abandoning the pending system
1390 * call or that we defer it's re-execution
1391 * (e.g., due to GDB doing an inferior
1394 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1396 if (*data != urbs_end) {
1398 convert_to_non_syscall(target,
1402 * Simulate user-level write
1406 pt->ar_bspstore = *data;
1411 case ELF_AR_BSPSTORE_OFFSET:
1412 ptr = &pt->ar_bspstore;
1414 case ELF_AR_RNAT_OFFSET:
1417 case ELF_AR_CCV_OFFSET:
1420 case ELF_AR_UNAT_OFFSET:
1423 case ELF_AR_FPSR_OFFSET:
1426 case ELF_AR_PFS_OFFSET:
1429 case ELF_AR_LC_OFFSET:
1430 return unw_access_ar(info, UNW_AR_LC, data,
1432 case ELF_AR_EC_OFFSET:
1433 return unw_access_ar(info, UNW_AR_EC, data,
1435 case ELF_AR_CSD_OFFSET:
1438 case ELF_AR_SSD_OFFSET:
1441 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1443 case ELF_CR_IIP_OFFSET:
1446 case ELF_CFM_OFFSET:
1447 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1449 if (((cfm ^ *data) & PFM_MASK) != 0) {
1451 convert_to_non_syscall(target,
1454 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1455 | (*data & PFM_MASK));
1460 case ELF_CR_IPSR_OFFSET:
1462 unsigned long tmp = *data;
1463 /* psr.ri==3 is a reserved value: SDM 2:25 */
1464 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1465 tmp &= ~IA64_PSR_RI;
1466 pt->cr_ipsr = ((tmp & IPSR_MASK)
1467 | (pt->cr_ipsr & ~IPSR_MASK));
1469 *data = (pt->cr_ipsr & IPSR_MASK);
1472 } else if (addr == ELF_NAT_OFFSET)
1473 return access_nat_bits(target, pt, info,
1474 data, write_access);
1475 else if (addr == ELF_PR_OFFSET)
1489 access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1490 unsigned long addr, unsigned long *data, int write_access)
1492 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1493 return access_elf_gpreg(target, info, addr, data, write_access);
1494 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1495 return access_elf_breg(target, info, addr, data, write_access);
1497 return access_elf_areg(target, info, addr, data, write_access);
1500 void do_gpregs_get(struct unw_frame_info *info, void *arg)
1503 struct regset_getset *dst = arg;
1505 unsigned int i, index, min_copy;
1507 if (unw_unwind_to_user(info) < 0)
1513 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1514 * predicate registers (p0-p63)
1517 * ar.rsc ar.bsp ar.bspstore ar.rnat
1518 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1523 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1524 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1527 0, ELF_GR_OFFSET(1));
1528 if (dst->ret || dst->count == 0)
1533 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1534 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1535 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1536 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1537 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1539 if (access_elf_reg(dst->target, info, i,
1540 &tmp[index], 0) < 0) {
1544 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1545 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1546 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1547 if (dst->ret || dst->count == 0)
1552 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1553 pt = task_pt_regs(dst->target);
1554 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1555 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1556 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1557 if (dst->ret || dst->count == 0)
1561 /* nat, pr, b0 - b7 */
1562 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1563 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1564 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1565 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1566 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1568 if (access_elf_reg(dst->target, info, i,
1569 &tmp[index], 0) < 0) {
1573 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1574 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1575 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1576 if (dst->ret || dst->count == 0)
1580 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1581 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1583 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1584 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1585 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1586 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1587 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1589 if (access_elf_reg(dst->target, info, i,
1590 &tmp[index], 0) < 0) {
1594 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1595 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1596 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1600 void do_gpregs_set(struct unw_frame_info *info, void *arg)
1603 struct regset_getset *dst = arg;
1605 unsigned int i, index;
1607 if (unw_unwind_to_user(info) < 0)
1611 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1612 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1615 0, ELF_GR_OFFSET(1));
1616 if (dst->ret || dst->count == 0)
1621 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1623 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1624 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1625 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1626 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1629 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1630 if (access_elf_reg(dst->target, info, i,
1631 &tmp[index], 1) < 0) {
1635 if (dst->count == 0)
1640 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1641 pt = task_pt_regs(dst->target);
1642 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1643 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1644 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1645 if (dst->ret || dst->count == 0)
1649 /* nat, pr, b0 - b7 */
1650 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1652 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1653 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1654 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1655 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1658 for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1659 if (access_elf_reg(dst->target, info, i,
1660 &tmp[index], 1) < 0) {
1664 if (dst->count == 0)
1668 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1669 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1671 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1673 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1674 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1675 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1676 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1679 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1680 if (access_elf_reg(dst->target, info, i,
1681 &tmp[index], 1) < 0) {
1688 #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1690 void do_fpregs_get(struct unw_frame_info *info, void *arg)
1692 struct regset_getset *dst = arg;
1693 struct task_struct *task = dst->target;
1694 elf_fpreg_t tmp[30];
1695 int index, min_copy, i;
1697 if (unw_unwind_to_user(info) < 0)
1700 /* Skip pos 0 and 1 */
1701 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1702 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1705 0, ELF_FP_OFFSET(2));
1706 if (dst->count == 0 || dst->ret)
1711 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1712 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1714 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1715 dst->pos + dst->count);
1716 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1718 if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1723 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1724 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1725 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1726 if (dst->count == 0 || dst->ret)
1731 if (dst->count > 0) {
1732 ia64_flush_fph(dst->target);
1733 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1734 dst->ret = user_regset_copyout(
1735 &dst->pos, &dst->count,
1736 &dst->u.get.kbuf, &dst->u.get.ubuf,
1737 &dst->target->thread.fph,
1738 ELF_FP_OFFSET(32), -1);
1740 /* Zero fill instead. */
1741 dst->ret = user_regset_copyout_zero(
1742 &dst->pos, &dst->count,
1743 &dst->u.get.kbuf, &dst->u.get.ubuf,
1744 ELF_FP_OFFSET(32), -1);
1748 void do_fpregs_set(struct unw_frame_info *info, void *arg)
1750 struct regset_getset *dst = arg;
1751 elf_fpreg_t fpreg, tmp[30];
1752 int index, start, end;
1754 if (unw_unwind_to_user(info) < 0)
1757 /* Skip pos 0 and 1 */
1758 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1759 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1762 0, ELF_FP_OFFSET(2));
1763 if (dst->count == 0 || dst->ret)
1768 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1770 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1771 dst->pos + dst->count);
1772 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1773 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1774 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1778 if (start & 0xF) { /* only write high part */
1779 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1784 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1788 if (end & 0xF) { /* only write low part */
1789 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1794 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1796 end = (end + 0xF) & ~0xFUL;
1799 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1800 index = start / sizeof(elf_fpreg_t);
1801 if (unw_set_fr(info, index, tmp[index - 2])) {
1806 if (dst->ret || dst->count == 0)
1811 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1812 ia64_sync_fph(dst->target);
1813 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1816 &dst->target->thread.fph,
1817 ELF_FP_OFFSET(32), -1);
1822 do_regset_call(void (*call)(struct unw_frame_info *, void *),
1823 struct task_struct *target,
1824 const struct user_regset *regset,
1825 unsigned int pos, unsigned int count,
1826 const void *kbuf, const void __user *ubuf)
1828 struct regset_getset info = { .target = target, .regset = regset,
1829 .pos = pos, .count = count,
1830 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1833 if (target == current)
1834 unw_init_running(call, &info);
1836 struct unw_frame_info ufi;
1837 memset(&ufi, 0, sizeof(ufi));
1838 unw_init_from_blocked_task(&ufi, target);
1839 (*call)(&ufi, &info);
1846 gpregs_get(struct task_struct *target,
1847 const struct user_regset *regset,
1848 unsigned int pos, unsigned int count,
1849 void *kbuf, void __user *ubuf)
1851 return do_regset_call(do_gpregs_get, target, regset, pos, count,
1855 static int gpregs_set(struct task_struct *target,
1856 const struct user_regset *regset,
1857 unsigned int pos, unsigned int count,
1858 const void *kbuf, const void __user *ubuf)
1860 return do_regset_call(do_gpregs_set, target, regset, pos, count,
1864 static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1866 do_sync_rbs(info, ia64_sync_user_rbs);
1870 * This is called to write back the register backing store.
1871 * ptrace does this before it stops, so that a tracer reading the user
1872 * memory after the thread stops will get the current register data.
1875 gpregs_writeback(struct task_struct *target,
1876 const struct user_regset *regset,
1879 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1881 set_notify_resume(target);
1882 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1887 fpregs_active(struct task_struct *target, const struct user_regset *regset)
1889 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1892 static int fpregs_get(struct task_struct *target,
1893 const struct user_regset *regset,
1894 unsigned int pos, unsigned int count,
1895 void *kbuf, void __user *ubuf)
1897 return do_regset_call(do_fpregs_get, target, regset, pos, count,
1901 static int fpregs_set(struct task_struct *target,
1902 const struct user_regset *regset,
1903 unsigned int pos, unsigned int count,
1904 const void *kbuf, const void __user *ubuf)
1906 return do_regset_call(do_fpregs_set, target, regset, pos, count,
1911 access_uarea(struct task_struct *child, unsigned long addr,
1912 unsigned long *data, int write_access)
1914 unsigned int pos = -1; /* an invalid value */
1916 unsigned long *ptr, regnum;
1918 if ((addr & 0x7) != 0) {
1919 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1922 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1923 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1924 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1925 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1926 dprintk("ptrace: rejecting access to register "
1927 "address 0x%lx\n", addr);
1932 case PT_F32 ... (PT_F127 + 15):
1933 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1935 case PT_F2 ... (PT_F5 + 15):
1936 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1938 case PT_F10 ... (PT_F31 + 15):
1939 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1941 case PT_F6 ... (PT_F9 + 15):
1942 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1948 ret = fpregs_set(child, NULL, pos,
1949 sizeof(unsigned long), data, NULL);
1951 ret = fpregs_get(child, NULL, pos,
1952 sizeof(unsigned long), data, NULL);
1960 pos = ELF_NAT_OFFSET;
1962 case PT_R4 ... PT_R7:
1963 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1965 case PT_B1 ... PT_B5:
1966 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1969 pos = ELF_AR_EC_OFFSET;
1972 pos = ELF_AR_LC_OFFSET;
1975 pos = ELF_CR_IPSR_OFFSET;
1978 pos = ELF_CR_IIP_OFFSET;
1981 pos = ELF_CFM_OFFSET;
1984 pos = ELF_AR_UNAT_OFFSET;
1987 pos = ELF_AR_PFS_OFFSET;
1990 pos = ELF_AR_RSC_OFFSET;
1993 pos = ELF_AR_RNAT_OFFSET;
1995 case PT_AR_BSPSTORE:
1996 pos = ELF_AR_BSPSTORE_OFFSET;
1999 pos = ELF_PR_OFFSET;
2002 pos = ELF_BR_OFFSET(6);
2005 pos = ELF_AR_BSP_OFFSET;
2007 case PT_R1 ... PT_R3:
2008 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2010 case PT_R12 ... PT_R15:
2011 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2013 case PT_R8 ... PT_R11:
2014 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2016 case PT_R16 ... PT_R31:
2017 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2020 pos = ELF_AR_CCV_OFFSET;
2023 pos = ELF_AR_FPSR_OFFSET;
2026 pos = ELF_BR_OFFSET(0);
2029 pos = ELF_BR_OFFSET(7);
2032 pos = ELF_AR_CSD_OFFSET;
2035 pos = ELF_AR_SSD_OFFSET;
2041 ret = gpregs_set(child, NULL, pos,
2042 sizeof(unsigned long), data, NULL);
2044 ret = gpregs_get(child, NULL, pos,
2045 sizeof(unsigned long), data, NULL);
2051 /* access debug registers */
2052 if (addr >= PT_IBR) {
2053 regnum = (addr - PT_IBR) >> 3;
2054 ptr = &child->thread.ibr[0];
2056 regnum = (addr - PT_DBR) >> 3;
2057 ptr = &child->thread.dbr[0];
2061 dprintk("ptrace: rejecting access to register "
2062 "address 0x%lx\n", addr);
2065 #ifdef CONFIG_PERFMON
2067 * Check if debug registers are used by perfmon. This
2068 * test must be done once we know that we can do the
2069 * operation, i.e. the arguments are all valid, but
2070 * before we start modifying the state.
2072 * Perfmon needs to keep a count of how many processes
2073 * are trying to modify the debug registers for system
2074 * wide monitoring sessions.
2076 * We also include read access here, because they may
2077 * cause the PMU-installed debug register state
2078 * (dbr[], ibr[]) to be reset. The two arrays are also
2079 * used by perfmon, but we do not use
2080 * IA64_THREAD_DBG_VALID. The registers are restored
2081 * by the PMU context switch code.
2083 if (pfm_use_debug_registers(child))
2087 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2088 child->thread.flags |= IA64_THREAD_DBG_VALID;
2089 memset(child->thread.dbr, 0,
2090 sizeof(child->thread.dbr));
2091 memset(child->thread.ibr, 0,
2092 sizeof(child->thread.ibr));
2097 if ((regnum & 1) && write_access) {
2098 /* don't let the user set kernel-level breakpoints: */
2099 *ptr = *data & ~(7UL << 56);
2109 static const struct user_regset native_regsets[] = {
2111 .core_note_type = NT_PRSTATUS,
2113 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2114 .get = gpregs_get, .set = gpregs_set,
2115 .writeback = gpregs_writeback
2118 .core_note_type = NT_PRFPREG,
2120 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2121 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2125 static const struct user_regset_view user_ia64_view = {
2127 .e_machine = EM_IA_64,
2128 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2131 const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2133 return &user_ia64_view;
2136 struct syscall_get_set_args {
2139 unsigned long *args;
2140 struct pt_regs *regs;
2144 static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2146 struct syscall_get_set_args *args = data;
2147 struct pt_regs *pt = args->regs;
2148 unsigned long *krbs, cfm, ndirty;
2151 if (unw_unwind_to_user(info) < 0)
2155 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2156 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2160 count = min_t(int, args->n, cfm & 0x7f);
2162 for (i = 0; i < count; i++) {
2164 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2167 args->args[i] = *ia64_rse_skip_regs(krbs,
2168 ndirty + i + args->i);
2172 while (i < args->n) {
2179 void ia64_syscall_get_set_arguments(struct task_struct *task,
2180 struct pt_regs *regs, unsigned int i, unsigned int n,
2181 unsigned long *args, int rw)
2183 struct syscall_get_set_args data = {
2191 if (task == current)
2192 unw_init_running(syscall_get_set_args_cb, &data);
2194 struct unw_frame_info ufi;
2195 memset(&ufi, 0, sizeof(ufi));
2196 unw_init_from_blocked_task(&ufi, task);
2197 syscall_get_set_args_cb(&ufi, &data);