help
Support for NXP Semiconductors STB225 Development Board.
- config PNX8550_JBS
- bool "NXP PNX8550 based JBS board"
- select PNX8550
- select SYS_SUPPORTS_LITTLE_ENDIAN
-
- config PNX8550_STB810
- bool "NXP PNX8550 based STB810 board"
- select PNX8550
- select SYS_SUPPORTS_LITTLE_ENDIAN
-
config PMC_MSP
bool "PMC-Sierra MSP chipsets"
- depends on EXPERIMENTAL
select CEVT_R4K
select CSRC_R4K
select DMA_NONCOHERENT
*
* @queue: Input queue to setup RED on (0-7)
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * All incomming packets will be dropped when there are less
++ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
* Setup Random Early Drop to automatically begin dropping packets.
*
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * All incomming packets will be dropped when there are less
++ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
*
* @queue: Input queue to setup RED on (0-7)
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * All incomming packets will be dropped when there are less
++ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
* Setup Random Early Drop to automatically begin dropping packets.
*
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * All incomming packets will be dropped when there are less
++ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
unsigned offset_a3, unsigned len_a4, unsigned len_a5)
{
return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
- merge_64(len_a4, len_a5));
+ merge_64(len_a4, len_a5));
}
-save_static_function(sys32_clone);
-static int noinline __used
-_sys32_clone(nabi_no_regargs struct pt_regs regs)
-{
- unsigned long clone_flags;
- unsigned long newsp;
- int __user *parent_tidptr, *child_tidptr;
-
- clone_flags = regs.regs[4];
- newsp = regs.regs[5];
- if (!newsp)
- newsp = regs.regs[29];
- parent_tidptr = (int __user *) regs.regs[6];
-
- /* Use __dummy4 instead of getting it off the stack, so that
- syscall() works. */
- child_tidptr = (int __user *) __dummy4;
- return do_fork(clone_flags, newsp, 0,
- parent_tidptr, child_tidptr);
-}
-
asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
size_t len)
{
return 0;
}
*childregs = *regs;
- childregs->regs[7] = 0; /* Clear error flag */
- childregs->regs[2] = 0; /* Child gets zero as return value */
+ childregs->regs[7] = 0; /* Clear error flag */
+ childregs->regs[2] = 0; /* Child gets zero as return value */
- childregs->regs[29] = usp;
+ if (usp)
+ childregs->regs[29] = usp;
ti->addr_limit = USER_DS;
p->thread.reg29 = (unsigned long) childregs;
PTR sys_sched_yield
PTR sys_sched_get_priority_max
PTR sys_sched_get_priority_min
- PTR compat_sys_sched_rr_get_interval /* 4165 */
- PTR sys_32_sched_rr_get_interval /* 4165 */
++ PTR compat_sys_sched_rr_get_interval /* 4165 */
PTR compat_sys_nanosleep
PTR sys_mremap
PTR sys_accept
PTR sys_getresgid
PTR sys_prctl
PTR sys32_rt_sigreturn
- PTR sys_32_rt_sigaction
- PTR sys_32_rt_sigprocmask /* 4195 */
- PTR sys_32_rt_sigpending
+ PTR compat_sys_rt_sigaction
- PTR compat_sys_rt_sigprocmask /* 4195 */
++ PTR compat_sys_rt_sigprocmask /* 4195 */
+ PTR compat_sys_rt_sigpending
PTR compat_sys_rt_sigtimedwait
- PTR sys_32_rt_sigqueueinfo
- PTR sys32_rt_sigsuspend
+ PTR compat_sys_rt_sigqueueinfo
+ PTR compat_sys_rt_sigsuspend
PTR sys_32_pread /* 4200 */
PTR sys_32_pwrite
PTR sys_chown
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(NULL, &frame->rs_uc.uc_link);
- err |= __put_user((void __user *)current->sas_ss_sp,
- &frame->rs_uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->regs[29]),
- &frame->rs_uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size,
- &frame->rs_uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
-struct sigaction32 {
- unsigned int sa_flags;
- __sighandler32_t sa_handler;
- compat_sigset_t sa_mask;
-};
-
-/* IRIX compatible stack_t */
-typedef struct sigaltstack32 {
- s32 ss_sp;
- compat_size_t ss_size;
- int ss_flags;
-} stack32_t;
-
struct ucontext32 {
- u32 uc_flags;
- s32 uc_link;
+ u32 uc_flags;
+ s32 uc_link;
- stack32_t uc_stack;
+ compat_stack_t uc_stack;
struct sigcontext32 uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct sigframe32 {
/* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
err |= copy_siginfo_to_user32(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
- sp = (int) (long) current->sas_ss_sp;
- err |= __put_user(sp,
- &frame->rs_uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->regs[29]),
- &frame->rs_uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size,
- &frame->rs_uc.uc_stack.ss_size);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
-
-/* IRIX compatible stack_t */
-typedef struct sigaltstack32 {
- s32 ss_sp;
- compat_size_t ss_size;
- int ss_flags;
-} stack32_t;
-
struct ucontextn32 {
- u32 uc_flags;
- s32 uc_link;
+ u32 uc_flags;
+ s32 uc_link;
- stack32_t uc_stack;
+ compat_stack_t uc_stack;
struct sigcontext uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct rt_sigframe_n32 {
/* Create siginfo. */
err |= copy_siginfo_to_user32(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
- sp = (int) (long) current->sas_ss_sp;
- err |= __put_user(sp,
- &frame->rs_uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->regs[29]),
- &frame->rs_uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size,
- &frame->rs_uc.uc_stack.ss_size);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
/*
* For historic reasons the pipe(2) syscall on MIPS has an unusual calling
- * convention. It returns results in registers $v0 / $v1 which means there
+ * convention. It returns results in registers $v0 / $v1 which means there
* is no need for it to do verify the validity of a userspace pointer
- * argument. Historically that used to be expensive in Linux. These days
+ * argument. Historically that used to be expensive in Linux. These days
* the performance advantage is negligible.
*/
-asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs)
+asmlinkage int sysm_pipe(void)
{
int fd[2];
- int error, res;
-
- error = do_pipe_flags(fd, 0);
- if (error) {
- res = error;
- goto out;
- }
- regs.regs[3] = fd[1];
- res = fd[0];
-out:
- return res;
+ int error = do_pipe_flags(fd, 0);
+ if (error)
+ return error;
+ current_pt_regs()->regs[3] = fd[1];
+ return fd[0];
}
SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,