1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
5 * Many improvements/fixes by Bruno Haible.
6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
24 * Better ipc lock (kern_ipc_perm.lock) handling
25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
28 #include <linux/slab.h>
30 #include <linux/hugetlb.h>
31 #include <linux/shm.h>
32 #include <linux/init.h>
33 #include <linux/file.h>
34 #include <linux/mman.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/security.h>
37 #include <linux/syscalls.h>
38 #include <linux/audit.h>
39 #include <linux/capability.h>
40 #include <linux/ptrace.h>
41 #include <linux/seq_file.h>
42 #include <linux/rwsem.h>
43 #include <linux/nsproxy.h>
44 #include <linux/mount.h>
45 #include <linux/ipc_namespace.h>
46 #include <linux/rhashtable.h>
48 #include <linux/uaccess.h>
52 struct shmid_kernel /* private to the kernel */
54 struct kern_ipc_perm shm_perm;
55 struct file *shm_file;
56 unsigned long shm_nattch;
57 unsigned long shm_segsz;
61 struct pid *shm_cprid;
62 struct pid *shm_lprid;
63 struct ucounts *mlock_ucounts;
66 * The task created the shm object, for
67 * task_lock(shp->shm_creator)
69 struct task_struct *shm_creator;
72 * List by creator. task_lock(->shm_creator) required for read/write.
73 * If list_empty(), then the creator is dead already.
75 struct list_head shm_clist;
76 struct ipc_namespace *ns;
79 /* shm_mode upper byte flags */
80 #define SHM_DEST 01000 /* segment will be destroyed on last detach */
81 #define SHM_LOCKED 02000 /* segment will not be swapped */
83 struct shm_file_data {
85 struct ipc_namespace *ns;
87 const struct vm_operations_struct *vm_ops;
90 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
92 static const struct file_operations shm_file_operations;
93 static const struct vm_operations_struct shm_vm_ops;
95 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
97 #define shm_unlock(shp) \
98 ipc_unlock(&(shp)->shm_perm)
100 static int newseg(struct ipc_namespace *, struct ipc_params *);
101 static void shm_open(struct vm_area_struct *vma);
102 static void shm_close(struct vm_area_struct *vma);
103 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
104 #ifdef CONFIG_PROC_FS
105 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
108 void shm_init_ns(struct ipc_namespace *ns)
110 ns->shm_ctlmax = SHMMAX;
111 ns->shm_ctlall = SHMALL;
112 ns->shm_ctlmni = SHMMNI;
113 ns->shm_rmid_forced = 0;
115 ipc_init_ids(&shm_ids(ns));
119 * Called with shm_ids.rwsem (writer) and the shp structure locked.
120 * Only shm_ids.rwsem remains locked on exit.
122 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
124 struct shmid_kernel *shp;
126 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
127 WARN_ON(ns != shp->ns);
129 if (shp->shm_nattch) {
130 shp->shm_perm.mode |= SHM_DEST;
131 /* Do not find it any more */
132 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
135 shm_destroy(ns, shp);
139 void shm_exit_ns(struct ipc_namespace *ns)
141 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
142 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
143 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
147 static int __init ipc_ns_init(void)
149 shm_init_ns(&init_ipc_ns);
153 pure_initcall(ipc_ns_init);
155 void __init shm_init(void)
157 ipc_init_proc_interface("sysvipc/shm",
158 #if BITS_PER_LONG <= 32
159 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
161 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
163 IPC_SHM_IDS, sysvipc_shm_proc_show);
166 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
168 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
171 return ERR_CAST(ipcp);
173 return container_of(ipcp, struct shmid_kernel, shm_perm);
176 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
178 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
181 return ERR_CAST(ipcp);
183 return container_of(ipcp, struct shmid_kernel, shm_perm);
187 * shm_lock_(check_) routines are called in the paths where the rwsem
188 * is not necessarily held.
190 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
192 struct kern_ipc_perm *ipcp;
195 ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
199 ipc_lock_object(ipcp);
201 * ipc_rmid() may have already freed the ID while ipc_lock_object()
202 * was spinning: here verify that the structure is still valid.
203 * Upon races with RMID, return -EIDRM, thus indicating that
204 * the ID points to a removed identifier.
206 if (ipc_valid_object(ipcp)) {
207 /* return a locked ipc object upon success */
208 return container_of(ipcp, struct shmid_kernel, shm_perm);
211 ipc_unlock_object(ipcp);
212 ipcp = ERR_PTR(-EIDRM);
216 * Callers of shm_lock() must validate the status of the returned ipc
217 * object pointer and error out as appropriate.
219 return ERR_CAST(ipcp);
222 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
225 ipc_lock_object(&ipcp->shm_perm);
228 static void shm_rcu_free(struct rcu_head *head)
230 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
232 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
234 security_shm_free(&shp->shm_perm);
239 * It has to be called with shp locked.
240 * It must be called before ipc_rmid()
242 static inline void shm_clist_rm(struct shmid_kernel *shp)
244 struct task_struct *creator;
246 /* ensure that shm_creator does not disappear */
250 * A concurrent exit_shm may do a list_del_init() as well.
251 * Just do nothing if exit_shm already did the work
253 if (!list_empty(&shp->shm_clist)) {
255 * shp->shm_creator is guaranteed to be valid *only*
256 * if shp->shm_clist is not empty.
258 creator = shp->shm_creator;
262 * list_del_init() is a nop if the entry was already removed
265 list_del_init(&shp->shm_clist);
266 task_unlock(creator);
271 static inline void shm_rmid(struct shmid_kernel *s)
274 ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
278 static int __shm_open(struct vm_area_struct *vma)
280 struct file *file = vma->vm_file;
281 struct shm_file_data *sfd = shm_file_data(file);
282 struct shmid_kernel *shp;
284 shp = shm_lock(sfd->ns, sfd->id);
289 if (shp->shm_file != sfd->file) {
295 shp->shm_atim = ktime_get_real_seconds();
296 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
302 /* This is called by fork, once for every shm attach. */
303 static void shm_open(struct vm_area_struct *vma)
305 int err = __shm_open(vma);
307 * We raced in the idr lookup or with shm_destroy().
308 * Either way, the ID is busted.
314 * shm_destroy - free the struct shmid_kernel
317 * @shp: struct to free
319 * It has to be called with shp and shm_ids.rwsem (writer) locked,
320 * but returns with shp unlocked and freed.
322 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
324 struct file *shm_file;
326 shm_file = shp->shm_file;
327 shp->shm_file = NULL;
328 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
331 if (!is_file_hugepages(shm_file))
332 shmem_lock(shm_file, 0, shp->mlock_ucounts);
333 else if (shp->mlock_ucounts)
334 user_shm_unlock(i_size_read(file_inode(shm_file)),
337 ipc_update_pid(&shp->shm_cprid, NULL);
338 ipc_update_pid(&shp->shm_lprid, NULL);
339 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
343 * shm_may_destroy - identifies whether shm segment should be destroyed now
345 * Returns true if and only if there are no active users of the segment and
346 * one of the following is true:
348 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
350 * 2) sysctl kernel.shm_rmid_forced is set to 1.
352 static bool shm_may_destroy(struct shmid_kernel *shp)
354 return (shp->shm_nattch == 0) &&
355 (shp->ns->shm_rmid_forced ||
356 (shp->shm_perm.mode & SHM_DEST));
360 * remove the attach descriptor vma.
361 * free memory for segment if it is marked destroyed.
362 * The descriptor has already been removed from the current->mm->mmap list
363 * and will later be kfree()d.
365 static void shm_close(struct vm_area_struct *vma)
367 struct file *file = vma->vm_file;
368 struct shm_file_data *sfd = shm_file_data(file);
369 struct shmid_kernel *shp;
370 struct ipc_namespace *ns = sfd->ns;
372 down_write(&shm_ids(ns).rwsem);
373 /* remove from the list of attaches of the shm segment */
374 shp = shm_lock(ns, sfd->id);
377 * We raced in the idr lookup or with shm_destroy().
378 * Either way, the ID is busted.
380 if (WARN_ON_ONCE(IS_ERR(shp)))
381 goto done; /* no-op */
383 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
384 shp->shm_dtim = ktime_get_real_seconds();
386 if (shm_may_destroy(shp))
387 shm_destroy(ns, shp);
391 up_write(&shm_ids(ns).rwsem);
394 /* Called with ns->shm_ids(ns).rwsem locked */
395 static int shm_try_destroy_orphaned(int id, void *p, void *data)
397 struct ipc_namespace *ns = data;
398 struct kern_ipc_perm *ipcp = p;
399 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
402 * We want to destroy segments without users and with already
403 * exit'ed originating process.
405 * As shp->* are changed under rwsem, it's safe to skip shp locking.
407 if (!list_empty(&shp->shm_clist))
410 if (shm_may_destroy(shp)) {
411 shm_lock_by_ptr(shp);
412 shm_destroy(ns, shp);
417 void shm_destroy_orphaned(struct ipc_namespace *ns)
419 down_write(&shm_ids(ns).rwsem);
420 if (shm_ids(ns).in_use)
421 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
422 up_write(&shm_ids(ns).rwsem);
425 /* Locking assumes this will only be called with task == current */
426 void exit_shm(struct task_struct *task)
429 struct shmid_kernel *shp;
430 struct ipc_namespace *ns;
434 if (list_empty(&task->sysvshm.shm_clist)) {
439 shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
443 * 1) Get pointer to the ipc namespace. It is worth to say
444 * that this pointer is guaranteed to be valid because
445 * shp lifetime is always shorter than namespace lifetime
446 * in which shp lives.
447 * We taken task_lock it means that shp won't be freed.
452 * 2) If kernel.shm_rmid_forced is not set then only keep track of
453 * which shmids are orphaned, so that a later set of the sysctl
456 if (!ns->shm_rmid_forced)
457 goto unlink_continue;
460 * 3) get a reference to the namespace.
461 * The refcount could be already 0. If it is 0, then
462 * the shm objects will be free by free_ipc_work().
464 ns = get_ipc_ns_not_zero(ns);
467 list_del_init(&shp->shm_clist);
473 * 4) get a reference to shp.
474 * This cannot fail: shm_clist_rm() is called before
475 * ipc_rmid(), thus the refcount cannot be 0.
477 WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
480 * 5) unlink the shm segment from the list of segments
481 * created by current.
482 * This must be done last. After unlinking,
483 * only the refcounts obtained above prevent IPC_RMID
484 * from destroying the segment or the namespace.
486 list_del_init(&shp->shm_clist);
491 * 6) we have all references
492 * Thus lock & if needed destroy shp.
494 down_write(&shm_ids(ns).rwsem);
495 shm_lock_by_ptr(shp);
497 * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
498 * safe to call ipc_rcu_putref here
500 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
502 if (ipc_valid_object(&shp->shm_perm)) {
503 if (shm_may_destroy(shp))
504 shm_destroy(ns, shp);
509 * Someone else deleted the shp from namespace
510 * idr/kht while we have waited.
511 * Just unlock and continue.
516 up_write(&shm_ids(ns).rwsem);
517 put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
521 static vm_fault_t shm_fault(struct vm_fault *vmf)
523 struct file *file = vmf->vma->vm_file;
524 struct shm_file_data *sfd = shm_file_data(file);
526 return sfd->vm_ops->fault(vmf);
529 static int shm_may_split(struct vm_area_struct *vma, unsigned long addr)
531 struct file *file = vma->vm_file;
532 struct shm_file_data *sfd = shm_file_data(file);
534 if (sfd->vm_ops->may_split)
535 return sfd->vm_ops->may_split(vma, addr);
540 static unsigned long shm_pagesize(struct vm_area_struct *vma)
542 struct file *file = vma->vm_file;
543 struct shm_file_data *sfd = shm_file_data(file);
545 if (sfd->vm_ops->pagesize)
546 return sfd->vm_ops->pagesize(vma);
552 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
554 struct file *file = vma->vm_file;
555 struct shm_file_data *sfd = shm_file_data(file);
558 if (sfd->vm_ops->set_policy)
559 err = sfd->vm_ops->set_policy(vma, new);
563 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
566 struct file *file = vma->vm_file;
567 struct shm_file_data *sfd = shm_file_data(file);
568 struct mempolicy *pol = NULL;
570 if (sfd->vm_ops->get_policy)
571 pol = sfd->vm_ops->get_policy(vma, addr);
572 else if (vma->vm_policy)
573 pol = vma->vm_policy;
579 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
581 struct shm_file_data *sfd = shm_file_data(file);
585 * In case of remap_file_pages() emulation, the file can represent an
586 * IPC ID that was removed, and possibly even reused by another shm
587 * segment already. Propagate this case as an error to caller.
589 ret = __shm_open(vma);
593 ret = call_mmap(sfd->file, vma);
598 sfd->vm_ops = vma->vm_ops;
600 WARN_ON(!sfd->vm_ops->fault);
602 vma->vm_ops = &shm_vm_ops;
606 static int shm_release(struct inode *ino, struct file *file)
608 struct shm_file_data *sfd = shm_file_data(file);
612 shm_file_data(file) = NULL;
617 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
619 struct shm_file_data *sfd = shm_file_data(file);
621 if (!sfd->file->f_op->fsync)
623 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
626 static long shm_fallocate(struct file *file, int mode, loff_t offset,
629 struct shm_file_data *sfd = shm_file_data(file);
631 if (!sfd->file->f_op->fallocate)
633 return sfd->file->f_op->fallocate(file, mode, offset, len);
636 static unsigned long shm_get_unmapped_area(struct file *file,
637 unsigned long addr, unsigned long len, unsigned long pgoff,
640 struct shm_file_data *sfd = shm_file_data(file);
642 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
646 static const struct file_operations shm_file_operations = {
649 .release = shm_release,
650 .get_unmapped_area = shm_get_unmapped_area,
651 .llseek = noop_llseek,
652 .fallocate = shm_fallocate,
656 * shm_file_operations_huge is now identical to shm_file_operations,
657 * but we keep it distinct for the sake of is_file_shm_hugepages().
659 static const struct file_operations shm_file_operations_huge = {
662 .release = shm_release,
663 .get_unmapped_area = shm_get_unmapped_area,
664 .llseek = noop_llseek,
665 .fallocate = shm_fallocate,
668 bool is_file_shm_hugepages(struct file *file)
670 return file->f_op == &shm_file_operations_huge;
673 static const struct vm_operations_struct shm_vm_ops = {
674 .open = shm_open, /* callback for a new vm-area open */
675 .close = shm_close, /* callback for when the vm-area is released */
677 .may_split = shm_may_split,
678 .pagesize = shm_pagesize,
679 #if defined(CONFIG_NUMA)
680 .set_policy = shm_set_policy,
681 .get_policy = shm_get_policy,
686 * newseg - Create a new shared memory segment
688 * @params: ptr to the structure that contains key, size and shmflg
690 * Called with shm_ids.rwsem held as a writer.
692 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
694 key_t key = params->key;
695 int shmflg = params->flg;
696 size_t size = params->u.size;
698 struct shmid_kernel *shp;
699 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
702 vm_flags_t acctflag = 0;
704 if (size < SHMMIN || size > ns->shm_ctlmax)
707 if (numpages << PAGE_SHIFT < size)
710 if (ns->shm_tot + numpages < ns->shm_tot ||
711 ns->shm_tot + numpages > ns->shm_ctlall)
714 shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
718 shp->shm_perm.key = key;
719 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
720 shp->mlock_ucounts = NULL;
722 shp->shm_perm.security = NULL;
723 error = security_shm_alloc(&shp->shm_perm);
729 sprintf(name, "SYSV%08x", key);
730 if (shmflg & SHM_HUGETLB) {
734 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
739 hugesize = ALIGN(size, huge_page_size(hs));
741 /* hugetlb_file_setup applies strict accounting */
742 if (shmflg & SHM_NORESERVE)
743 acctflag = VM_NORESERVE;
744 file = hugetlb_file_setup(name, hugesize, acctflag,
745 &shp->mlock_ucounts, HUGETLB_SHMFS_INODE,
746 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
749 * Do not allow no accounting for OVERCOMMIT_NEVER, even
752 if ((shmflg & SHM_NORESERVE) &&
753 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
754 acctflag = VM_NORESERVE;
755 file = shmem_kernel_file_setup(name, size, acctflag);
757 error = PTR_ERR(file);
761 shp->shm_cprid = get_pid(task_tgid(current));
762 shp->shm_lprid = NULL;
763 shp->shm_atim = shp->shm_dtim = 0;
764 shp->shm_ctim = ktime_get_real_seconds();
765 shp->shm_segsz = size;
767 shp->shm_file = file;
768 shp->shm_creator = current;
770 /* ipc_addid() locks shp upon success. */
771 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
778 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
779 task_unlock(current);
782 * shmid gets reported as "inode#" in /proc/pid/maps.
783 * proc-ps tools use this. Changing this will break them.
785 file_inode(file)->i_ino = shp->shm_perm.id;
787 ns->shm_tot += numpages;
788 error = shp->shm_perm.id;
790 ipc_unlock_object(&shp->shm_perm);
795 ipc_update_pid(&shp->shm_cprid, NULL);
796 ipc_update_pid(&shp->shm_lprid, NULL);
797 if (is_file_hugepages(file) && shp->mlock_ucounts)
798 user_shm_unlock(size, shp->mlock_ucounts);
800 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
803 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
808 * Called with shm_ids.rwsem and ipcp locked.
810 static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
812 struct shmid_kernel *shp;
814 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
815 if (shp->shm_segsz < params->u.size)
821 long ksys_shmget(key_t key, size_t size, int shmflg)
823 struct ipc_namespace *ns;
824 static const struct ipc_ops shm_ops = {
826 .associate = security_shm_associate,
827 .more_checks = shm_more_checks,
829 struct ipc_params shm_params;
831 ns = current->nsproxy->ipc_ns;
833 shm_params.key = key;
834 shm_params.flg = shmflg;
835 shm_params.u.size = size;
837 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
840 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
842 return ksys_shmget(key, size, shmflg);
845 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
849 return copy_to_user(buf, in, sizeof(*in));
854 memset(&out, 0, sizeof(out));
855 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
856 out.shm_segsz = in->shm_segsz;
857 out.shm_atime = in->shm_atime;
858 out.shm_dtime = in->shm_dtime;
859 out.shm_ctime = in->shm_ctime;
860 out.shm_cpid = in->shm_cpid;
861 out.shm_lpid = in->shm_lpid;
862 out.shm_nattch = in->shm_nattch;
864 return copy_to_user(buf, &out, sizeof(out));
871 static inline unsigned long
872 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
876 if (copy_from_user(out, buf, sizeof(*out)))
881 struct shmid_ds tbuf_old;
883 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
886 out->shm_perm.uid = tbuf_old.shm_perm.uid;
887 out->shm_perm.gid = tbuf_old.shm_perm.gid;
888 out->shm_perm.mode = tbuf_old.shm_perm.mode;
897 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
901 return copy_to_user(buf, in, sizeof(*in));
906 if (in->shmmax > INT_MAX)
907 out.shmmax = INT_MAX;
909 out.shmmax = (int)in->shmmax;
911 out.shmmin = in->shmmin;
912 out.shmmni = in->shmmni;
913 out.shmseg = in->shmseg;
914 out.shmall = in->shmall;
916 return copy_to_user(buf, &out, sizeof(out));
924 * Calculate and add used RSS and swap pages of a shm.
925 * Called with shm_ids.rwsem held as a reader
927 static void shm_add_rss_swap(struct shmid_kernel *shp,
928 unsigned long *rss_add, unsigned long *swp_add)
932 inode = file_inode(shp->shm_file);
934 if (is_file_hugepages(shp->shm_file)) {
935 struct address_space *mapping = inode->i_mapping;
936 struct hstate *h = hstate_file(shp->shm_file);
937 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
940 struct shmem_inode_info *info = SHMEM_I(inode);
942 spin_lock_irq(&info->lock);
943 *rss_add += inode->i_mapping->nrpages;
944 *swp_add += info->swapped;
945 spin_unlock_irq(&info->lock);
947 *rss_add += inode->i_mapping->nrpages;
953 * Called with shm_ids.rwsem held as a reader
955 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
964 in_use = shm_ids(ns).in_use;
966 for (total = 0, next_id = 0; total < in_use; next_id++) {
967 struct kern_ipc_perm *ipc;
968 struct shmid_kernel *shp;
970 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
973 shp = container_of(ipc, struct shmid_kernel, shm_perm);
975 shm_add_rss_swap(shp, rss, swp);
982 * This function handles some shmctl commands which require the rwsem
983 * to be held in write mode.
984 * NOTE: no locks must be held, the rwsem is taken inside this function.
986 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
987 struct shmid64_ds *shmid64)
989 struct kern_ipc_perm *ipcp;
990 struct shmid_kernel *shp;
993 down_write(&shm_ids(ns).rwsem);
996 ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd,
997 &shmid64->shm_perm, 0);
1003 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1005 err = security_shm_shmctl(&shp->shm_perm, cmd);
1011 ipc_lock_object(&shp->shm_perm);
1012 /* do_shm_rmid unlocks the ipc object and rcu */
1013 do_shm_rmid(ns, ipcp);
1016 ipc_lock_object(&shp->shm_perm);
1017 err = ipc_update_perm(&shmid64->shm_perm, ipcp);
1020 shp->shm_ctim = ktime_get_real_seconds();
1028 ipc_unlock_object(&shp->shm_perm);
1032 up_write(&shm_ids(ns).rwsem);
1036 static int shmctl_ipc_info(struct ipc_namespace *ns,
1037 struct shminfo64 *shminfo)
1039 int err = security_shm_shmctl(NULL, IPC_INFO);
1041 memset(shminfo, 0, sizeof(*shminfo));
1042 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
1043 shminfo->shmmax = ns->shm_ctlmax;
1044 shminfo->shmall = ns->shm_ctlall;
1045 shminfo->shmmin = SHMMIN;
1046 down_read(&shm_ids(ns).rwsem);
1047 err = ipc_get_maxidx(&shm_ids(ns));
1048 up_read(&shm_ids(ns).rwsem);
1055 static int shmctl_shm_info(struct ipc_namespace *ns,
1056 struct shm_info *shm_info)
1058 int err = security_shm_shmctl(NULL, SHM_INFO);
1060 memset(shm_info, 0, sizeof(*shm_info));
1061 down_read(&shm_ids(ns).rwsem);
1062 shm_info->used_ids = shm_ids(ns).in_use;
1063 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
1064 shm_info->shm_tot = ns->shm_tot;
1065 shm_info->swap_attempts = 0;
1066 shm_info->swap_successes = 0;
1067 err = ipc_get_maxidx(&shm_ids(ns));
1068 up_read(&shm_ids(ns).rwsem);
1075 static int shmctl_stat(struct ipc_namespace *ns, int shmid,
1076 int cmd, struct shmid64_ds *tbuf)
1078 struct shmid_kernel *shp;
1081 memset(tbuf, 0, sizeof(*tbuf));
1084 if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
1085 shp = shm_obtain_object(ns, shmid);
1090 } else { /* IPC_STAT */
1091 shp = shm_obtain_object_check(ns, shmid);
1099 * Semantically SHM_STAT_ANY ought to be identical to
1100 * that functionality provided by the /proc/sysvipc/
1101 * interface. As such, only audit these calls and
1102 * do not do traditional S_IRUGO permission checks on
1105 if (cmd == SHM_STAT_ANY)
1106 audit_ipc_obj(&shp->shm_perm);
1109 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1113 err = security_shm_shmctl(&shp->shm_perm, cmd);
1117 ipc_lock_object(&shp->shm_perm);
1119 if (!ipc_valid_object(&shp->shm_perm)) {
1120 ipc_unlock_object(&shp->shm_perm);
1125 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1126 tbuf->shm_segsz = shp->shm_segsz;
1127 tbuf->shm_atime = shp->shm_atim;
1128 tbuf->shm_dtime = shp->shm_dtim;
1129 tbuf->shm_ctime = shp->shm_ctim;
1130 #ifndef CONFIG_64BIT
1131 tbuf->shm_atime_high = shp->shm_atim >> 32;
1132 tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1133 tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1135 tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
1136 tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
1137 tbuf->shm_nattch = shp->shm_nattch;
1139 if (cmd == IPC_STAT) {
1141 * As defined in SUS:
1142 * Return 0 on success
1147 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1148 * Return the full id, including the sequence number
1150 err = shp->shm_perm.id;
1153 ipc_unlock_object(&shp->shm_perm);
1159 static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1161 struct shmid_kernel *shp;
1162 struct file *shm_file;
1166 shp = shm_obtain_object_check(ns, shmid);
1172 audit_ipc_obj(&(shp->shm_perm));
1173 err = security_shm_shmctl(&shp->shm_perm, cmd);
1177 ipc_lock_object(&shp->shm_perm);
1179 /* check if shm_destroy() is tearing down shp */
1180 if (!ipc_valid_object(&shp->shm_perm)) {
1185 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1186 kuid_t euid = current_euid();
1188 if (!uid_eq(euid, shp->shm_perm.uid) &&
1189 !uid_eq(euid, shp->shm_perm.cuid)) {
1193 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1199 shm_file = shp->shm_file;
1200 if (is_file_hugepages(shm_file))
1203 if (cmd == SHM_LOCK) {
1204 struct ucounts *ucounts = current_ucounts();
1206 err = shmem_lock(shm_file, 1, ucounts);
1207 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1208 shp->shm_perm.mode |= SHM_LOCKED;
1209 shp->mlock_ucounts = ucounts;
1215 if (!(shp->shm_perm.mode & SHM_LOCKED))
1217 shmem_lock(shm_file, 0, shp->mlock_ucounts);
1218 shp->shm_perm.mode &= ~SHM_LOCKED;
1219 shp->mlock_ucounts = NULL;
1221 ipc_unlock_object(&shp->shm_perm);
1223 shmem_unlock_mapping(shm_file->f_mapping);
1229 ipc_unlock_object(&shp->shm_perm);
1235 static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version)
1238 struct ipc_namespace *ns;
1239 struct shmid64_ds sem64;
1241 if (cmd < 0 || shmid < 0)
1244 ns = current->nsproxy->ipc_ns;
1248 struct shminfo64 shminfo;
1249 err = shmctl_ipc_info(ns, &shminfo);
1252 if (copy_shminfo_to_user(buf, &shminfo, version))
1257 struct shm_info shm_info;
1258 err = shmctl_shm_info(ns, &shm_info);
1261 if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1268 err = shmctl_stat(ns, shmid, cmd, &sem64);
1271 if (copy_shmid_to_user(buf, &sem64, version))
1276 if (copy_shmid_from_user(&sem64, buf, version))
1280 return shmctl_down(ns, shmid, cmd, &sem64);
1283 return shmctl_do_lock(ns, shmid, cmd);
1289 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1291 return ksys_shmctl(shmid, cmd, buf, IPC_64);
1294 #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1295 long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1297 int version = ipc_parse_version(&cmd);
1299 return ksys_shmctl(shmid, cmd, buf, version);
1302 SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1304 return ksys_old_shmctl(shmid, cmd, buf);
1308 #ifdef CONFIG_COMPAT
1310 struct compat_shmid_ds {
1311 struct compat_ipc_perm shm_perm;
1313 old_time32_t shm_atime;
1314 old_time32_t shm_dtime;
1315 old_time32_t shm_ctime;
1316 compat_ipc_pid_t shm_cpid;
1317 compat_ipc_pid_t shm_lpid;
1318 unsigned short shm_nattch;
1319 unsigned short shm_unused;
1320 compat_uptr_t shm_unused2;
1321 compat_uptr_t shm_unused3;
1324 struct compat_shminfo64 {
1325 compat_ulong_t shmmax;
1326 compat_ulong_t shmmin;
1327 compat_ulong_t shmmni;
1328 compat_ulong_t shmseg;
1329 compat_ulong_t shmall;
1330 compat_ulong_t __unused1;
1331 compat_ulong_t __unused2;
1332 compat_ulong_t __unused3;
1333 compat_ulong_t __unused4;
1336 struct compat_shm_info {
1337 compat_int_t used_ids;
1338 compat_ulong_t shm_tot, shm_rss, shm_swp;
1339 compat_ulong_t swap_attempts, swap_successes;
1342 static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1345 if (in->shmmax > INT_MAX)
1346 in->shmmax = INT_MAX;
1347 if (version == IPC_64) {
1348 struct compat_shminfo64 info;
1349 memset(&info, 0, sizeof(info));
1350 info.shmmax = in->shmmax;
1351 info.shmmin = in->shmmin;
1352 info.shmmni = in->shmmni;
1353 info.shmseg = in->shmseg;
1354 info.shmall = in->shmall;
1355 return copy_to_user(buf, &info, sizeof(info));
1357 struct shminfo info;
1358 memset(&info, 0, sizeof(info));
1359 info.shmmax = in->shmmax;
1360 info.shmmin = in->shmmin;
1361 info.shmmni = in->shmmni;
1362 info.shmseg = in->shmseg;
1363 info.shmall = in->shmall;
1364 return copy_to_user(buf, &info, sizeof(info));
1368 static int put_compat_shm_info(struct shm_info *ip,
1369 struct compat_shm_info __user *uip)
1371 struct compat_shm_info info;
1373 memset(&info, 0, sizeof(info));
1374 info.used_ids = ip->used_ids;
1375 info.shm_tot = ip->shm_tot;
1376 info.shm_rss = ip->shm_rss;
1377 info.shm_swp = ip->shm_swp;
1378 info.swap_attempts = ip->swap_attempts;
1379 info.swap_successes = ip->swap_successes;
1380 return copy_to_user(uip, &info, sizeof(info));
1383 static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1386 if (version == IPC_64) {
1387 struct compat_shmid64_ds v;
1388 memset(&v, 0, sizeof(v));
1389 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1390 v.shm_atime = lower_32_bits(in->shm_atime);
1391 v.shm_atime_high = upper_32_bits(in->shm_atime);
1392 v.shm_dtime = lower_32_bits(in->shm_dtime);
1393 v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1394 v.shm_ctime = lower_32_bits(in->shm_ctime);
1395 v.shm_ctime_high = upper_32_bits(in->shm_ctime);
1396 v.shm_segsz = in->shm_segsz;
1397 v.shm_nattch = in->shm_nattch;
1398 v.shm_cpid = in->shm_cpid;
1399 v.shm_lpid = in->shm_lpid;
1400 return copy_to_user(buf, &v, sizeof(v));
1402 struct compat_shmid_ds v;
1403 memset(&v, 0, sizeof(v));
1404 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1405 v.shm_perm.key = in->shm_perm.key;
1406 v.shm_atime = in->shm_atime;
1407 v.shm_dtime = in->shm_dtime;
1408 v.shm_ctime = in->shm_ctime;
1409 v.shm_segsz = in->shm_segsz;
1410 v.shm_nattch = in->shm_nattch;
1411 v.shm_cpid = in->shm_cpid;
1412 v.shm_lpid = in->shm_lpid;
1413 return copy_to_user(buf, &v, sizeof(v));
1417 static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1420 memset(out, 0, sizeof(*out));
1421 if (version == IPC_64) {
1422 struct compat_shmid64_ds __user *p = buf;
1423 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1425 struct compat_shmid_ds __user *p = buf;
1426 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1430 static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
1432 struct ipc_namespace *ns;
1433 struct shmid64_ds sem64;
1436 ns = current->nsproxy->ipc_ns;
1438 if (cmd < 0 || shmid < 0)
1443 struct shminfo64 shminfo;
1444 err = shmctl_ipc_info(ns, &shminfo);
1447 if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1452 struct shm_info shm_info;
1453 err = shmctl_shm_info(ns, &shm_info);
1456 if (put_compat_shm_info(&shm_info, uptr))
1463 err = shmctl_stat(ns, shmid, cmd, &sem64);
1466 if (copy_compat_shmid_to_user(uptr, &sem64, version))
1471 if (copy_compat_shmid_from_user(&sem64, uptr, version))
1475 return shmctl_down(ns, shmid, cmd, &sem64);
1478 return shmctl_do_lock(ns, shmid, cmd);
1485 COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1487 return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64);
1490 #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1491 long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr)
1493 int version = compat_ipc_parse_version(&cmd);
1495 return compat_ksys_shmctl(shmid, cmd, uptr, version);
1498 COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr)
1500 return compat_ksys_old_shmctl(shmid, cmd, uptr);
1506 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1508 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1509 * "raddr" thing points to kernel space, and there has to be a wrapper around
1512 long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1513 ulong *raddr, unsigned long shmlba)
1515 struct shmid_kernel *shp;
1516 unsigned long addr = (unsigned long)shmaddr;
1518 struct file *file, *base;
1520 unsigned long flags = MAP_SHARED;
1523 struct ipc_namespace *ns;
1524 struct shm_file_data *sfd;
1526 unsigned long populate = 0;
1533 if (addr & (shmlba - 1)) {
1534 if (shmflg & SHM_RND) {
1535 addr &= ~(shmlba - 1); /* round down */
1538 * Ensure that the round-down is non-nil
1539 * when remapping. This can happen for
1540 * cases when addr < shmlba.
1542 if (!addr && (shmflg & SHM_REMAP))
1545 #ifndef __ARCH_FORCE_SHMLBA
1546 if (addr & ~PAGE_MASK)
1552 } else if ((shmflg & SHM_REMAP))
1555 if (shmflg & SHM_RDONLY) {
1560 prot = PROT_READ | PROT_WRITE;
1561 acc_mode = S_IRUGO | S_IWUGO;
1564 if (shmflg & SHM_EXEC) {
1566 acc_mode |= S_IXUGO;
1570 * We cannot rely on the fs check since SYSV IPC does have an
1571 * additional creator id...
1573 ns = current->nsproxy->ipc_ns;
1575 shp = shm_obtain_object_check(ns, shmid);
1582 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1585 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1589 ipc_lock_object(&shp->shm_perm);
1591 /* check if shm_destroy() is tearing down shp */
1592 if (!ipc_valid_object(&shp->shm_perm)) {
1593 ipc_unlock_object(&shp->shm_perm);
1599 * We need to take a reference to the real shm file to prevent the
1600 * pointer from becoming stale in cases where the lifetime of the outer
1601 * file extends beyond that of the shm segment. It's not usually
1602 * possible, but it can happen during remap_file_pages() emulation as
1603 * that unmaps the memory, then does ->mmap() via file reference only.
1604 * We'll deny the ->mmap() if the shm segment was since removed, but to
1605 * detect shm ID reuse we need to compare the file pointers.
1607 base = get_file(shp->shm_file);
1609 size = i_size_read(file_inode(base));
1610 ipc_unlock_object(&shp->shm_perm);
1614 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1620 file = alloc_file_clone(base, f_flags,
1621 is_file_hugepages(base) ?
1622 &shm_file_operations_huge :
1623 &shm_file_operations);
1624 err = PTR_ERR(file);
1631 sfd->id = shp->shm_perm.id;
1632 sfd->ns = get_ipc_ns(ns);
1635 file->private_data = sfd;
1637 err = security_mmap_file(file, prot, flags);
1641 if (mmap_write_lock_killable(current->mm)) {
1646 if (addr && !(shmflg & SHM_REMAP)) {
1648 if (addr + size < addr)
1651 if (find_vma_intersection(current->mm, addr, addr + size))
1655 addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL);
1658 if (IS_ERR_VALUE(addr))
1661 mmap_write_unlock(current->mm);
1663 mm_populate(addr, populate);
1669 down_write(&shm_ids(ns).rwsem);
1670 shp = shm_lock(ns, shmid);
1673 if (shm_may_destroy(shp))
1674 shm_destroy(ns, shp);
1677 up_write(&shm_ids(ns).rwsem);
1686 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1691 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1694 force_successful_syscall_return();
1698 #ifdef CONFIG_COMPAT
1700 #ifndef COMPAT_SHMLBA
1701 #define COMPAT_SHMLBA SHMLBA
1704 COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1709 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1712 force_successful_syscall_return();
1718 * detach and kill segment if marked destroyed.
1719 * The work is done in shm_close.
1721 long ksys_shmdt(char __user *shmaddr)
1723 struct mm_struct *mm = current->mm;
1724 struct vm_area_struct *vma;
1725 unsigned long addr = (unsigned long)shmaddr;
1726 int retval = -EINVAL;
1730 struct vm_area_struct *next;
1733 if (addr & ~PAGE_MASK)
1736 if (mmap_write_lock_killable(mm))
1740 * This function tries to be smart and unmap shm segments that
1741 * were modified by partial mlock or munmap calls:
1742 * - It first determines the size of the shm segment that should be
1743 * unmapped: It searches for a vma that is backed by shm and that
1744 * started at address shmaddr. It records it's size and then unmaps
1746 * - Then it unmaps all shm vmas that started at shmaddr and that
1747 * are within the initially determined size and that are from the
1748 * same shm segment from which we determined the size.
1749 * Errors from do_munmap are ignored: the function only fails if
1750 * it's called with invalid parameters or if it's called to unmap
1751 * a part of a vma. Both calls in this function are for full vmas,
1752 * the parameters are directly copied from the vma itself and always
1753 * valid - therefore do_munmap cannot fail. (famous last words?)
1756 * If it had been mremap()'d, the starting address would not
1757 * match the usual checks anyway. So assume all vma's are
1758 * above the starting address given.
1760 vma = find_vma(mm, addr);
1764 next = vma->vm_next;
1767 * Check if the starting address would match, i.e. it's
1768 * a fragment created by mprotect() and/or munmap(), or it
1769 * otherwise it starts at this address with no hassles.
1771 if ((vma->vm_ops == &shm_vm_ops) &&
1772 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1775 * Record the file of the shm segment being
1776 * unmapped. With mremap(), someone could place
1777 * page from another segment but with equal offsets
1778 * in the range we are unmapping.
1780 file = vma->vm_file;
1781 size = i_size_read(file_inode(vma->vm_file));
1782 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1784 * We discovered the size of the shm segment, so
1785 * break out of here and fall through to the next
1786 * loop that uses the size information to stop
1787 * searching for matching vma's.
1797 * We need look no further than the maximum address a fragment
1798 * could possibly have landed at. Also cast things to loff_t to
1799 * prevent overflows and make comparisons vs. equal-width types.
1801 size = PAGE_ALIGN(size);
1802 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1803 next = vma->vm_next;
1805 /* finding a matching vma now does not alter retval */
1806 if ((vma->vm_ops == &shm_vm_ops) &&
1807 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1808 (vma->vm_file == file))
1809 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1813 #else /* CONFIG_MMU */
1814 /* under NOMMU conditions, the exact address to be destroyed must be
1817 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1818 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1824 mmap_write_unlock(mm);
1828 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1830 return ksys_shmdt(shmaddr);
1833 #ifdef CONFIG_PROC_FS
1834 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1836 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1837 struct user_namespace *user_ns = seq_user_ns(s);
1838 struct kern_ipc_perm *ipcp = it;
1839 struct shmid_kernel *shp;
1840 unsigned long rss = 0, swp = 0;
1842 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1843 shm_add_rss_swap(shp, &rss, &swp);
1845 #if BITS_PER_LONG <= 32
1846 #define SIZE_SPEC "%10lu"
1848 #define SIZE_SPEC "%21lu"
1852 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1853 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1854 SIZE_SPEC " " SIZE_SPEC "\n",
1859 pid_nr_ns(shp->shm_cprid, pid_ns),
1860 pid_nr_ns(shp->shm_lprid, pid_ns),
1862 from_kuid_munged(user_ns, shp->shm_perm.uid),
1863 from_kgid_munged(user_ns, shp->shm_perm.gid),
1864 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1865 from_kgid_munged(user_ns, shp->shm_perm.cgid),