1 /* linux/arch/sparc64/kernel/sys_sparc.c
3 * This file contains various random system calls that
4 * have a non-standard calling sequence on the Linux/sparc
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
12 #include <linux/file.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/utsname.h>
20 #include <linux/smp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
23 #include <linux/ipc.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
26 #include <linux/export.h>
28 #include <asm/uaccess.h>
29 #include <asm/utrap.h>
30 #include <asm/unistd.h>
35 /* #define DEBUG_UNIMP_SYSCALL */
37 asmlinkage unsigned long sys_getpagesize(void)
42 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
43 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
45 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
46 * overflow past the end of the 64-bit address space?
48 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
50 unsigned long va_exclude_start, va_exclude_end;
52 va_exclude_start = VA_EXCLUDE_START;
53 va_exclude_end = VA_EXCLUDE_END;
55 if (unlikely(len >= va_exclude_start))
58 if (unlikely((addr + len) < addr))
61 if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
62 ((addr + len) >= va_exclude_start &&
63 (addr + len) < va_exclude_end)))
69 /* These functions differ from the default implementations in
70 * mm/mmap.c in two ways:
72 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
73 * for fixed such mappings we just validate what the user gave us.
74 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
75 * the spitfire/niagara VA-hole.
78 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
81 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
82 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
87 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
90 unsigned long base = addr & ~(SHMLBA-1);
91 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
93 if (base + off <= addr)
98 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
100 struct mm_struct *mm = current->mm;
101 struct vm_area_struct * vma;
102 unsigned long task_size = TASK_SIZE;
103 unsigned long start_addr;
106 if (flags & MAP_FIXED) {
107 /* We do not accept a shared mapping if it would violate
108 * cache aliasing constraints.
110 if ((flags & MAP_SHARED) &&
111 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
116 if (test_thread_flag(TIF_32BIT))
117 task_size = STACK_TOP32;
118 if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
122 if (filp || (flags & MAP_SHARED))
127 addr = COLOUR_ALIGN(addr, pgoff);
129 addr = PAGE_ALIGN(addr);
131 vma = find_vma(mm, addr);
132 if (task_size - len >= addr &&
133 (!vma || addr + len <= vma->vm_start))
137 if (len > mm->cached_hole_size) {
138 start_addr = addr = mm->free_area_cache;
140 start_addr = addr = TASK_UNMAPPED_BASE;
141 mm->cached_hole_size = 0;
148 addr = COLOUR_ALIGN(addr, pgoff);
150 addr = PAGE_ALIGN(addr);
152 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
153 /* At this point: (!vma || addr < vma->vm_end). */
154 if (addr < VA_EXCLUDE_START &&
155 (addr + len) >= VA_EXCLUDE_START) {
156 addr = VA_EXCLUDE_END;
157 vma = find_vma(mm, VA_EXCLUDE_END);
159 if (unlikely(task_size < addr)) {
160 if (start_addr != TASK_UNMAPPED_BASE) {
161 start_addr = addr = TASK_UNMAPPED_BASE;
162 mm->cached_hole_size = 0;
167 if (likely(!vma || addr + len <= vma->vm_start)) {
169 * Remember the place where we stopped the search:
171 mm->free_area_cache = addr + len;
174 if (addr + mm->cached_hole_size < vma->vm_start)
175 mm->cached_hole_size = vma->vm_start - addr;
179 addr = COLOUR_ALIGN(addr, pgoff);
184 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
185 const unsigned long len, const unsigned long pgoff,
186 const unsigned long flags)
188 struct vm_area_struct *vma;
189 struct mm_struct *mm = current->mm;
190 unsigned long task_size = STACK_TOP32;
191 unsigned long addr = addr0;
194 /* This should only ever run for 32-bit processes. */
195 BUG_ON(!test_thread_flag(TIF_32BIT));
197 if (flags & MAP_FIXED) {
198 /* We do not accept a shared mapping if it would violate
199 * cache aliasing constraints.
201 if ((flags & MAP_SHARED) &&
202 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
207 if (unlikely(len > task_size))
211 if (filp || (flags & MAP_SHARED))
214 /* requesting a specific address */
217 addr = COLOUR_ALIGN(addr, pgoff);
219 addr = PAGE_ALIGN(addr);
221 vma = find_vma(mm, addr);
222 if (task_size - len >= addr &&
223 (!vma || addr + len <= vma->vm_start))
227 /* check if free_area_cache is useful for us */
228 if (len <= mm->cached_hole_size) {
229 mm->cached_hole_size = 0;
230 mm->free_area_cache = mm->mmap_base;
233 /* either no address requested or can't fit in requested address hole */
234 addr = mm->free_area_cache;
235 if (do_color_align) {
236 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
241 /* make sure it can fit in the remaining address space */
242 if (likely(addr > len)) {
243 vma = find_vma(mm, addr-len);
244 if (!vma || addr <= vma->vm_start) {
245 /* remember the address as a hint for next time */
246 return (mm->free_area_cache = addr-len);
250 if (unlikely(mm->mmap_base < len))
253 addr = mm->mmap_base-len;
255 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
259 * Lookup failure means no vma is above this address,
260 * else if new region fits below vma->vm_start,
261 * return with success:
263 vma = find_vma(mm, addr);
264 if (likely(!vma || addr+len <= vma->vm_start)) {
265 /* remember the address as a hint for next time */
266 return (mm->free_area_cache = addr);
269 /* remember the largest hole we saw so far */
270 if (addr + mm->cached_hole_size < vma->vm_start)
271 mm->cached_hole_size = vma->vm_start - addr;
273 /* try just below the current vma->vm_start */
274 addr = vma->vm_start-len;
276 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
277 } while (likely(len < vma->vm_start));
281 * A failed mmap() very likely causes application failure,
282 * so fall back to the bottom-up function here. This scenario
283 * can happen with large stack limits and large mmap()
286 mm->cached_hole_size = ~0UL;
287 mm->free_area_cache = TASK_UNMAPPED_BASE;
288 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
290 * Restore the topdown base:
292 mm->free_area_cache = mm->mmap_base;
293 mm->cached_hole_size = ~0UL;
298 /* Try to align mapping such that we align it as much as possible. */
299 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
301 unsigned long align_goal, addr = -ENOMEM;
302 unsigned long (*get_area)(struct file *, unsigned long,
303 unsigned long, unsigned long, unsigned long);
305 get_area = current->mm->get_unmapped_area;
307 if (flags & MAP_FIXED) {
308 /* Ok, don't mess with it. */
309 return get_area(NULL, orig_addr, len, pgoff, flags);
311 flags &= ~MAP_SHARED;
313 align_goal = PAGE_SIZE;
314 if (len >= (4UL * 1024 * 1024))
315 align_goal = (4UL * 1024 * 1024);
316 else if (len >= (512UL * 1024))
317 align_goal = (512UL * 1024);
318 else if (len >= (64UL * 1024))
319 align_goal = (64UL * 1024);
322 addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
323 if (!(addr & ~PAGE_MASK)) {
324 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
328 if (align_goal == (4UL * 1024 * 1024))
329 align_goal = (512UL * 1024);
330 else if (align_goal == (512UL * 1024))
331 align_goal = (64UL * 1024);
333 align_goal = PAGE_SIZE;
334 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
336 /* Mapping is smaller than 64K or larger areas could not
339 if (addr & ~PAGE_MASK)
340 addr = get_area(NULL, orig_addr, len, pgoff, flags);
344 EXPORT_SYMBOL(get_fb_unmapped_area);
346 /* Essentially the same as PowerPC. */
347 static unsigned long mmap_rnd(void)
349 unsigned long rnd = 0UL;
351 if (current->flags & PF_RANDOMIZE) {
352 unsigned long val = get_random_int();
353 if (test_thread_flag(TIF_32BIT))
354 rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
356 rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
358 return rnd << PAGE_SHIFT;
361 void arch_pick_mmap_layout(struct mm_struct *mm)
363 unsigned long random_factor = mmap_rnd();
367 * Fall back to the standard layout if the personality
368 * bit is set, or if the expected stack growth is unlimited:
370 gap = rlimit(RLIMIT_STACK);
371 if (!test_thread_flag(TIF_32BIT) ||
372 (current->personality & ADDR_COMPAT_LAYOUT) ||
373 gap == RLIM_INFINITY ||
374 sysctl_legacy_va_layout) {
375 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
376 mm->get_unmapped_area = arch_get_unmapped_area;
377 mm->unmap_area = arch_unmap_area;
379 /* We know it's 32-bit */
380 unsigned long task_size = STACK_TOP32;
382 if (gap < 128 * 1024 * 1024)
383 gap = 128 * 1024 * 1024;
384 if (gap > (task_size / 6 * 5))
385 gap = (task_size / 6 * 5);
387 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
388 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
389 mm->unmap_area = arch_unmap_area_topdown;
394 * sys_pipe() is the normal C calling standard for creating
395 * a pipe. It's not the way unix traditionally does this, though.
397 SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
402 error = do_pipe_flags(fd, 0);
405 regs->u_regs[UREG_I1] = fd[1];
412 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
414 * This is really horribly ugly.
417 SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
418 unsigned long, third, void __user *, ptr, long, fifth)
422 /* No need for backward compatibility. We can start fresh... */
423 if (call <= SEMCTL) {
426 err = sys_semtimedop(first, ptr,
427 (unsigned)second, NULL);
430 err = sys_semtimedop(first, ptr, (unsigned)second,
431 (const struct timespec __user *)
432 (unsigned long) fifth);
435 err = sys_semget(first, (int)second, (int)third);
438 err = sys_semctl(first, second,
448 if (call <= MSGCTL) {
451 err = sys_msgsnd(first, ptr, (size_t)second,
455 err = sys_msgrcv(first, ptr, (size_t)second, fifth,
459 err = sys_msgget((key_t)first, (int)second);
462 err = sys_msgctl(first, (int)second | IPC_64, ptr);
469 if (call <= SHMCTL) {
473 err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
476 (ulong __user *) third))
482 err = sys_shmdt(ptr);
485 err = sys_shmget(first, (size_t)second, (int)third);
488 err = sys_shmctl(first, (int)second | IPC_64, ptr);
501 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
505 if (personality(current->personality) == PER_LINUX32 &&
506 personality(personality) == PER_LINUX)
507 personality |= PER_LINUX32;
508 ret = sys_personality(personality);
509 if (personality(ret) == PER_LINUX32)
515 int sparc_mmap_check(unsigned long addr, unsigned long len)
517 if (test_thread_flag(TIF_32BIT)) {
518 if (len >= STACK_TOP32)
521 if (addr > STACK_TOP32 - len)
524 if (len >= VA_EXCLUDE_START)
527 if (invalid_64bit_range(addr, len))
534 /* Linux version of mmap */
535 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
536 unsigned long, prot, unsigned long, flags, unsigned long, fd,
539 unsigned long retval = -EINVAL;
541 if ((off + PAGE_ALIGN(len)) < off)
543 if (off & ~PAGE_MASK)
545 retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
550 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
552 if (invalid_64bit_range(addr, len))
555 return vm_munmap(addr, len);
558 extern unsigned long do_mremap(unsigned long addr,
559 unsigned long old_len, unsigned long new_len,
560 unsigned long flags, unsigned long new_addr);
562 SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
563 unsigned long, new_len, unsigned long, flags,
564 unsigned long, new_addr)
566 if (test_thread_flag(TIF_32BIT))
568 return sys_mremap(addr, old_len, new_len, flags, new_addr);
571 /* we come to here via sys_nis_syscall so it can setup the regs argument */
572 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
576 /* Don't make the system unusable, if someone goes stuck */
580 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
581 #ifdef DEBUG_UNIMP_SYSCALL
588 /* #define DEBUG_SPARC_BREAKPOINT */
590 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
594 if (test_thread_flag(TIF_32BIT)) {
595 regs->tpc &= 0xffffffff;
596 regs->tnpc &= 0xffffffff;
598 #ifdef DEBUG_SPARC_BREAKPOINT
599 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
601 info.si_signo = SIGTRAP;
603 info.si_code = TRAP_BRKPT;
604 info.si_addr = (void __user *)regs->tpc;
606 force_sig_info(SIGTRAP, &info, current);
607 #ifdef DEBUG_SPARC_BREAKPOINT
608 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
612 extern void check_pending(int signum);
614 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
623 nlen = strlen(utsname()->domainname) + 1;
629 if (!copy_to_user(name, utsname()->domainname, nlen))
637 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
638 utrap_handler_t, new_p, utrap_handler_t, new_d,
639 utrap_handler_t __user *, old_p,
640 utrap_handler_t __user *, old_d)
642 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
644 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
646 if (!current_thread_info()->utraps) {
647 if (put_user(NULL, old_p))
650 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
655 if (put_user(NULL, old_d))
660 if (!current_thread_info()->utraps) {
661 current_thread_info()->utraps =
662 kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
663 if (!current_thread_info()->utraps)
665 current_thread_info()->utraps[0] = 1;
667 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
668 current_thread_info()->utraps[0] > 1) {
669 unsigned long *p = current_thread_info()->utraps;
671 current_thread_info()->utraps =
672 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
674 if (!current_thread_info()->utraps) {
675 current_thread_info()->utraps = p;
679 current_thread_info()->utraps[0] = 1;
680 memcpy(current_thread_info()->utraps+1, p+1,
681 UT_TRAP_INSTRUCTION_31*sizeof(long));
685 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
689 if (put_user(NULL, old_d))
692 current_thread_info()->utraps[type] = (long)new_p;
697 asmlinkage long sparc_memory_ordering(unsigned long model,
698 struct pt_regs *regs)
702 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
706 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
707 struct sigaction __user *, oact, void __user *, restorer,
710 struct k_sigaction new_ka, old_ka;
713 /* XXX: Don't preclude handling different sized sigset_t's. */
714 if (sigsetsize != sizeof(sigset_t))
718 new_ka.ka_restorer = restorer;
719 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
723 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
726 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
734 * Do a system call from kernel instead of calling sys_execve so we
735 * end up with proper pt_regs.
737 int kernel_execve(const char *filename,
738 const char *const argv[],
739 const char *const envp[])
742 register long __g1 __asm__ ("g1") = __NR_execve;
743 register long __o0 __asm__ ("o0") = (long)(filename);
744 register long __o1 __asm__ ("o1") = (long)(argv);
745 register long __o2 __asm__ ("o2") = (long)(envp);
746 asm volatile ("t 0x6d\n\t"
747 "sub %%g0, %%o0, %0\n\t"
748 "movcc %%xcc, %%o0, %0\n\t"
749 : "=r" (__res), "=&r" (__o0)
750 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
755 asmlinkage long sys_kern_features(void)
757 return KERN_FEATURE_MIXED_MODE_STACK;