3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
25 #include <linux/capability.h>
26 #include <linux/msg.h>
27 #include <linux/spinlock.h>
28 #include <linux/init.h>
30 #include <linux/proc_fs.h>
31 #include <linux/list.h>
32 #include <linux/security.h>
33 #include <linux/sched.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/seq_file.h>
37 #include <linux/rwsem.h>
38 #include <linux/nsproxy.h>
39 #include <linux/ipc_namespace.h>
41 #include <asm/current.h>
42 #include <asm/uaccess.h>
46 * one msg_receiver structure for each sleeping receiver:
49 struct list_head r_list;
50 struct task_struct *r_tsk;
56 struct msg_msg *volatile r_msg;
59 /* one msg_sender for each sleeping sender */
61 struct list_head list;
62 struct task_struct *tsk;
66 #define SEARCH_EQUAL 2
67 #define SEARCH_NOTEQUAL 3
68 #define SEARCH_LESSEQUAL 4
69 #define SEARCH_NUMBER 5
71 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
73 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
75 static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
76 static int newque(struct ipc_namespace *, struct ipc_params *);
78 static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
82 * Scale msgmni with the available lowmem size: the memory dedicated to msg
83 * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
84 * Also take into account the number of nsproxies created so far.
85 * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
87 void recompute_msgmni(struct ipc_namespace *ns)
90 unsigned long allowed;
94 allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
96 nb_ns = atomic_read(&nr_ipc_ns);
99 if (allowed < MSGMNI) {
100 ns->msg_ctlmni = MSGMNI;
104 if (allowed > IPCMNI / nb_ns) {
105 ns->msg_ctlmni = IPCMNI / nb_ns;
109 ns->msg_ctlmni = allowed;
112 void msg_init_ns(struct ipc_namespace *ns)
114 ns->msg_ctlmax = MSGMAX;
115 ns->msg_ctlmnb = MSGMNB;
117 recompute_msgmni(ns);
119 atomic_set(&ns->msg_bytes, 0);
120 atomic_set(&ns->msg_hdrs, 0);
121 ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
125 void msg_exit_ns(struct ipc_namespace *ns)
127 free_ipcs(ns, &msg_ids(ns), freeque);
128 idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
132 void __init msg_init(void)
134 msg_init_ns(&init_ipc_ns);
136 printk(KERN_INFO "msgmni has been set to %d\n",
137 init_ipc_ns.msg_ctlmni);
139 ipc_init_proc_interface("sysvipc/msg",
140 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
141 IPC_MSG_IDS, sysvipc_msg_proc_show);
145 * msg_lock_(check_) routines are called in the paths where the rw_mutex
148 static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
150 struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
153 return (struct msg_queue *)ipcp;
155 return container_of(ipcp, struct msg_queue, q_perm);
158 static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
161 struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
164 return (struct msg_queue *)ipcp;
166 return container_of(ipcp, struct msg_queue, q_perm);
169 static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id)
171 struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id);
174 return ERR_CAST(ipcp);
176 return container_of(ipcp, struct msg_queue, q_perm);
179 static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns,
182 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id);
185 return ERR_CAST(ipcp);
187 return container_of(ipcp, struct msg_queue, q_perm);
190 static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
192 ipc_rmid(&msg_ids(ns), &s->q_perm);
196 * newque - Create a new msg queue
198 * @params: ptr to the structure that contains the key and msgflg
200 * Called with msg_ids.rw_mutex held (writer)
202 static int newque(struct ipc_namespace *ns, struct ipc_params *params)
204 struct msg_queue *msq;
206 key_t key = params->key;
207 int msgflg = params->flg;
209 msq = ipc_rcu_alloc(sizeof(*msq));
213 msq->q_perm.mode = msgflg & S_IRWXUGO;
214 msq->q_perm.key = key;
216 msq->q_perm.security = NULL;
217 retval = security_msg_queue_alloc(msq);
223 /* ipc_addid() locks msq upon success. */
224 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
226 security_msg_queue_free(msq);
231 msq->q_stime = msq->q_rtime = 0;
232 msq->q_ctime = get_seconds();
233 msq->q_cbytes = msq->q_qnum = 0;
234 msq->q_qbytes = ns->msg_ctlmnb;
235 msq->q_lspid = msq->q_lrpid = 0;
236 INIT_LIST_HEAD(&msq->q_messages);
237 INIT_LIST_HEAD(&msq->q_receivers);
238 INIT_LIST_HEAD(&msq->q_senders);
240 ipc_unlock_object(&msq->q_perm);
243 return msq->q_perm.id;
246 static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
249 current->state = TASK_INTERRUPTIBLE;
250 list_add_tail(&mss->list, &msq->q_senders);
253 static inline void ss_del(struct msg_sender *mss)
255 if (mss->list.next != NULL)
256 list_del(&mss->list);
259 static void ss_wakeup(struct list_head *h, int kill)
261 struct msg_sender *mss, *t;
263 list_for_each_entry_safe(mss, t, h, list) {
265 mss->list.next = NULL;
266 wake_up_process(mss->tsk);
270 static void expunge_all(struct msg_queue *msq, int res)
272 struct msg_receiver *msr, *t;
274 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
276 wake_up_process(msr->r_tsk);
278 msr->r_msg = ERR_PTR(res);
283 * freeque() wakes up waiters on the sender and receiver waiting queue,
284 * removes the message queue from message queue ID IDR, and cleans up all the
285 * messages associated with this queue.
287 * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
288 * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
290 static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
292 struct msg_msg *msg, *t;
293 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
295 expunge_all(msq, -EIDRM);
296 ss_wakeup(&msq->q_senders, 1);
300 list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
301 atomic_dec(&ns->msg_hdrs);
304 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
305 security_msg_queue_free(msq);
310 * Called with msg_ids.rw_mutex and ipcp locked.
312 static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
314 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
316 return security_msg_queue_associate(msq, msgflg);
319 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
321 struct ipc_namespace *ns;
322 struct ipc_ops msg_ops;
323 struct ipc_params msg_params;
325 ns = current->nsproxy->ipc_ns;
327 msg_ops.getnew = newque;
328 msg_ops.associate = msg_security;
329 msg_ops.more_checks = NULL;
331 msg_params.key = key;
332 msg_params.flg = msgflg;
334 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
337 static inline unsigned long
338 copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
342 return copy_to_user(buf, in, sizeof(*in));
347 memset(&out, 0, sizeof(out));
349 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
351 out.msg_stime = in->msg_stime;
352 out.msg_rtime = in->msg_rtime;
353 out.msg_ctime = in->msg_ctime;
355 if (in->msg_cbytes > USHRT_MAX)
356 out.msg_cbytes = USHRT_MAX;
358 out.msg_cbytes = in->msg_cbytes;
359 out.msg_lcbytes = in->msg_cbytes;
361 if (in->msg_qnum > USHRT_MAX)
362 out.msg_qnum = USHRT_MAX;
364 out.msg_qnum = in->msg_qnum;
366 if (in->msg_qbytes > USHRT_MAX)
367 out.msg_qbytes = USHRT_MAX;
369 out.msg_qbytes = in->msg_qbytes;
370 out.msg_lqbytes = in->msg_qbytes;
372 out.msg_lspid = in->msg_lspid;
373 out.msg_lrpid = in->msg_lrpid;
375 return copy_to_user(buf, &out, sizeof(out));
382 static inline unsigned long
383 copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
387 if (copy_from_user(out, buf, sizeof(*out)))
392 struct msqid_ds tbuf_old;
394 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
397 out->msg_perm.uid = tbuf_old.msg_perm.uid;
398 out->msg_perm.gid = tbuf_old.msg_perm.gid;
399 out->msg_perm.mode = tbuf_old.msg_perm.mode;
401 if (tbuf_old.msg_qbytes == 0)
402 out->msg_qbytes = tbuf_old.msg_lqbytes;
404 out->msg_qbytes = tbuf_old.msg_qbytes;
414 * This function handles some msgctl commands which require the rw_mutex
415 * to be held in write mode.
416 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
418 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
419 struct msqid_ds __user *buf, int version)
421 struct kern_ipc_perm *ipcp;
422 struct msqid64_ds uninitialized_var(msqid64);
423 struct msg_queue *msq;
426 if (cmd == IPC_SET) {
427 if (copy_msqid_from_user(&msqid64, buf, version))
431 down_write(&msg_ids(ns).rw_mutex);
434 ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
435 &msqid64.msg_perm, msqid64.msg_qbytes);
441 msq = container_of(ipcp, struct msg_queue, q_perm);
443 err = security_msg_queue_msgctl(msq, cmd);
449 ipc_lock_object(&msq->q_perm);
450 /* freeque unlocks the ipc object and rcu */
454 if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
455 !capable(CAP_SYS_RESOURCE)) {
460 ipc_lock_object(&msq->q_perm);
461 err = ipc_update_perm(&msqid64.msg_perm, ipcp);
465 msq->q_qbytes = msqid64.msg_qbytes;
467 msq->q_ctime = get_seconds();
468 /* sleeping receivers might be excluded by
469 * stricter permissions.
471 expunge_all(msq, -EAGAIN);
472 /* sleeping senders might be able to send
473 * due to a larger queue size.
475 ss_wakeup(&msq->q_senders, 0);
483 ipc_unlock_object(&msq->q_perm);
487 up_write(&msg_ids(ns).rw_mutex);
491 static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
492 int cmd, int version, void __user *buf)
495 struct msg_queue *msq;
501 struct msginfo msginfo;
508 * We must not return kernel stack data.
509 * due to padding, it's not enough
510 * to set all member fields.
512 err = security_msg_queue_msgctl(NULL, cmd);
516 memset(&msginfo, 0, sizeof(msginfo));
517 msginfo.msgmni = ns->msg_ctlmni;
518 msginfo.msgmax = ns->msg_ctlmax;
519 msginfo.msgmnb = ns->msg_ctlmnb;
520 msginfo.msgssz = MSGSSZ;
521 msginfo.msgseg = MSGSEG;
522 down_read(&msg_ids(ns).rw_mutex);
523 if (cmd == MSG_INFO) {
524 msginfo.msgpool = msg_ids(ns).in_use;
525 msginfo.msgmap = atomic_read(&ns->msg_hdrs);
526 msginfo.msgtql = atomic_read(&ns->msg_bytes);
528 msginfo.msgmap = MSGMAP;
529 msginfo.msgpool = MSGPOOL;
530 msginfo.msgtql = MSGTQL;
532 max_id = ipc_get_maxid(&msg_ids(ns));
533 up_read(&msg_ids(ns).rw_mutex);
534 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
536 return (max_id < 0) ? 0 : max_id;
542 struct msqid64_ds tbuf;
548 memset(&tbuf, 0, sizeof(tbuf));
551 if (cmd == MSG_STAT) {
552 msq = msq_obtain_object(ns, msqid);
557 success_return = msq->q_perm.id;
559 msq = msq_obtain_object_check(ns, msqid);
568 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
571 err = security_msg_queue_msgctl(msq, cmd);
575 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
576 tbuf.msg_stime = msq->q_stime;
577 tbuf.msg_rtime = msq->q_rtime;
578 tbuf.msg_ctime = msq->q_ctime;
579 tbuf.msg_cbytes = msq->q_cbytes;
580 tbuf.msg_qnum = msq->q_qnum;
581 tbuf.msg_qbytes = msq->q_qbytes;
582 tbuf.msg_lspid = msq->q_lspid;
583 tbuf.msg_lrpid = msq->q_lrpid;
586 if (copy_msqid_to_user(buf, &tbuf, version))
588 return success_return;
601 SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
604 struct ipc_namespace *ns;
606 if (msqid < 0 || cmd < 0)
609 version = ipc_parse_version(&cmd);
610 ns = current->nsproxy->ipc_ns;
615 case MSG_STAT: /* msqid is an index rather than a msg queue id */
617 return msgctl_nolock(ns, msqid, cmd, version, buf);
620 return msgctl_down(ns, msqid, cmd, buf, version);
626 static int testmsg(struct msg_msg *msg, long type, int mode)
633 case SEARCH_LESSEQUAL:
634 if (msg->m_type <=type)
638 if (msg->m_type == type)
641 case SEARCH_NOTEQUAL:
642 if (msg->m_type != type)
649 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
651 struct msg_receiver *msr, *t;
653 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
654 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
655 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
656 msr->r_msgtype, msr->r_mode)) {
658 list_del(&msr->r_list);
659 if (msr->r_maxsize < msg->m_ts) {
661 wake_up_process(msr->r_tsk);
663 msr->r_msg = ERR_PTR(-E2BIG);
666 msq->q_lrpid = task_pid_vnr(msr->r_tsk);
667 msq->q_rtime = get_seconds();
668 wake_up_process(msr->r_tsk);
679 long do_msgsnd(int msqid, long mtype, void __user *mtext,
680 size_t msgsz, int msgflg)
682 struct msg_queue *msq;
685 struct ipc_namespace *ns;
687 ns = current->nsproxy->ipc_ns;
689 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
694 msg = load_msg(mtext, msgsz);
702 msq = msq_obtain_object_check(ns, msqid);
712 if (ipcperms(ns, &msq->q_perm, S_IWUGO))
715 err = security_msg_queue_msgsnd(msq, msg, msgflg);
719 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
720 1 + msq->q_qnum <= msq->q_qbytes) {
724 /* queue full, wait: */
725 if (msgflg & IPC_NOWAIT) {
730 ipc_lock_object(&msq->q_perm);
733 if (!ipc_rcu_getref(msq)) {
738 ipc_unlock_object(&msq->q_perm);
743 ipc_lock_object(&msq->q_perm);
746 if (msq->q_perm.deleted) {
753 if (signal_pending(current)) {
754 err = -ERESTARTNOHAND;
758 ipc_unlock_object(&msq->q_perm);
761 ipc_lock_object(&msq->q_perm);
762 msq->q_lspid = task_tgid_vnr(current);
763 msq->q_stime = get_seconds();
765 if (!pipelined_send(msq, msg)) {
766 /* no one is waiting for this message, enqueue it */
767 list_add_tail(&msg->m_list, &msq->q_messages);
768 msq->q_cbytes += msgsz;
770 atomic_add(msgsz, &ns->msg_bytes);
771 atomic_inc(&ns->msg_hdrs);
778 ipc_unlock_object(&msq->q_perm);
786 SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
791 if (get_user(mtype, &msgp->mtype))
793 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
796 static inline int convert_mode(long *msgtyp, int msgflg)
798 if (msgflg & MSG_COPY)
799 return SEARCH_NUMBER;
801 * find message of correct type.
802 * msgtyp = 0 => get first.
803 * msgtyp > 0 => get first message of matching type.
804 * msgtyp < 0 => get message with least type must be < abs(msgtype).
810 return SEARCH_LESSEQUAL;
812 if (msgflg & MSG_EXCEPT)
813 return SEARCH_NOTEQUAL;
817 static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
819 struct msgbuf __user *msgp = dest;
822 if (put_user(msg->m_type, &msgp->mtype))
825 msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
826 if (store_msg(msgp->mtext, msg, msgsz))
831 #ifdef CONFIG_CHECKPOINT_RESTORE
833 * This function creates new kernel message structure, large enough to store
834 * bufsz message bytes.
836 static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
838 struct msg_msg *copy;
841 * Create dummy message to copy real message to.
843 copy = load_msg(buf, bufsz);
849 static inline void free_copy(struct msg_msg *copy)
855 static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
857 return ERR_PTR(-ENOSYS);
860 static inline void free_copy(struct msg_msg *copy)
865 static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
870 list_for_each_entry(msg, &msq->q_messages, m_list) {
871 if (testmsg(msg, *msgtyp, mode) &&
872 !security_msg_queue_msgrcv(msq, msg, current,
874 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
875 *msgtyp = msg->m_type - 1;
876 } else if (mode == SEARCH_NUMBER) {
877 if (*msgtyp == count)
885 return ERR_PTR(-EAGAIN);
889 long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
891 long (*msg_handler)(void __user *, struct msg_msg *, size_t))
893 struct msg_queue *msq;
896 struct ipc_namespace *ns;
897 struct msg_msg *copy = NULL;
899 ns = current->nsproxy->ipc_ns;
901 if (msqid < 0 || (long) bufsz < 0)
903 if (msgflg & MSG_COPY) {
904 copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
906 return PTR_ERR(copy);
908 mode = convert_mode(&msgtyp, msgflg);
910 msq = msg_lock_check(ns, msqid);
917 struct msg_receiver msr_d;
919 msg = ERR_PTR(-EACCES);
920 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
923 msg = find_msg(msq, &msgtyp, mode);
927 * Found a suitable message.
928 * Unlink it from the queue.
930 if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
931 msg = ERR_PTR(-E2BIG);
935 * If we are copying, then do not unlink message and do
936 * not update queue parameters.
938 if (msgflg & MSG_COPY) {
939 msg = copy_msg(msg, copy);
942 list_del(&msg->m_list);
944 msq->q_rtime = get_seconds();
945 msq->q_lrpid = task_tgid_vnr(current);
946 msq->q_cbytes -= msg->m_ts;
947 atomic_sub(msg->m_ts, &ns->msg_bytes);
948 atomic_dec(&ns->msg_hdrs);
949 ss_wakeup(&msq->q_senders, 0);
953 /* No message waiting. Wait for a message */
954 if (msgflg & IPC_NOWAIT) {
955 msg = ERR_PTR(-ENOMSG);
958 list_add_tail(&msr_d.r_list, &msq->q_receivers);
959 msr_d.r_tsk = current;
960 msr_d.r_msgtype = msgtyp;
962 if (msgflg & MSG_NOERROR)
963 msr_d.r_maxsize = INT_MAX;
965 msr_d.r_maxsize = bufsz;
966 msr_d.r_msg = ERR_PTR(-EAGAIN);
967 current->state = TASK_INTERRUPTIBLE;
972 /* Lockless receive, part 1:
973 * Disable preemption. We don't hold a reference to the queue
974 * and getting a reference would defeat the idea of a lockless
975 * operation, thus the code relies on rcu to guarantee the
977 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
978 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
979 * rcu_read_lock() prevents preemption between reading r_msg
980 * and the spin_lock() inside ipc_lock_by_ptr().
984 /* Lockless receive, part 2:
985 * Wait until pipelined_send or expunge_all are outside of
986 * wake_up_process(). There is a race with exit(), see
987 * ipc/mqueue.c for the details.
989 msg = (struct msg_msg*)msr_d.r_msg;
990 while (msg == NULL) {
992 msg = (struct msg_msg *)msr_d.r_msg;
995 /* Lockless receive, part 3:
996 * If there is a message or an error then accept it without
999 if (msg != ERR_PTR(-EAGAIN)) {
1004 /* Lockless receive, part 3:
1005 * Acquire the queue spinlock.
1007 ipc_lock_by_ptr(&msq->q_perm);
1010 /* Lockless receive, part 4:
1011 * Repeat test after acquiring the spinlock.
1013 msg = (struct msg_msg*)msr_d.r_msg;
1014 if (msg != ERR_PTR(-EAGAIN))
1017 list_del(&msr_d.r_list);
1018 if (signal_pending(current)) {
1019 msg = ERR_PTR(-ERESTARTNOHAND);
1027 return PTR_ERR(msg);
1030 bufsz = msg_handler(buf, msg, bufsz);
1036 SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
1037 long, msgtyp, int, msgflg)
1039 return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
1042 #ifdef CONFIG_PROC_FS
1043 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
1045 struct user_namespace *user_ns = seq_user_ns(s);
1046 struct msg_queue *msq = it;
1048 return seq_printf(s,
1049 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
1057 from_kuid_munged(user_ns, msq->q_perm.uid),
1058 from_kgid_munged(user_ns, msq->q_perm.gid),
1059 from_kuid_munged(user_ns, msq->q_perm.cuid),
1060 from_kgid_munged(user_ns, msq->q_perm.cgid),