1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
5 * Many improvements/fixes by Bruno Haible.
6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
24 * Better ipc lock (kern_ipc_perm.lock) handling
25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
28 #include <linux/slab.h>
30 #include <linux/hugetlb.h>
31 #include <linux/shm.h>
32 #include <linux/init.h>
33 #include <linux/file.h>
34 #include <linux/mman.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/security.h>
37 #include <linux/syscalls.h>
38 #include <linux/audit.h>
39 #include <linux/capability.h>
40 #include <linux/ptrace.h>
41 #include <linux/seq_file.h>
42 #include <linux/rwsem.h>
43 #include <linux/nsproxy.h>
44 #include <linux/mount.h>
45 #include <linux/ipc_namespace.h>
46 #include <linux/rhashtable.h>
48 #include <linux/uaccess.h>
52 struct shmid_kernel /* private to the kernel */
54 struct kern_ipc_perm shm_perm;
55 struct file *shm_file;
56 unsigned long shm_nattch;
57 unsigned long shm_segsz;
61 struct pid *shm_cprid;
62 struct pid *shm_lprid;
63 struct ucounts *mlock_ucounts;
66 * The task created the shm object, for
67 * task_lock(shp->shm_creator)
69 struct task_struct *shm_creator;
72 * List by creator. task_lock(->shm_creator) required for read/write.
73 * If list_empty(), then the creator is dead already.
75 struct list_head shm_clist;
76 struct ipc_namespace *ns;
79 /* shm_mode upper byte flags */
80 #define SHM_DEST 01000 /* segment will be destroyed on last detach */
81 #define SHM_LOCKED 02000 /* segment will not be swapped */
83 struct shm_file_data {
85 struct ipc_namespace *ns;
87 const struct vm_operations_struct *vm_ops;
90 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
92 static const struct file_operations shm_file_operations;
93 static const struct vm_operations_struct shm_vm_ops;
95 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
97 #define shm_unlock(shp) \
98 ipc_unlock(&(shp)->shm_perm)
100 static int newseg(struct ipc_namespace *, struct ipc_params *);
101 static void shm_open(struct vm_area_struct *vma);
102 static void shm_close(struct vm_area_struct *vma);
103 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
104 #ifdef CONFIG_PROC_FS
105 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
108 void shm_init_ns(struct ipc_namespace *ns)
110 ns->shm_ctlmax = SHMMAX;
111 ns->shm_ctlall = SHMALL;
112 ns->shm_ctlmni = SHMMNI;
113 ns->shm_rmid_forced = 0;
115 ipc_init_ids(&shm_ids(ns));
119 * Called with shm_ids.rwsem (writer) and the shp structure locked.
120 * Only shm_ids.rwsem remains locked on exit.
122 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
124 struct shmid_kernel *shp;
126 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
127 WARN_ON(ns != shp->ns);
129 if (shp->shm_nattch) {
130 shp->shm_perm.mode |= SHM_DEST;
131 /* Do not find it any more */
132 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
135 shm_destroy(ns, shp);
139 void shm_exit_ns(struct ipc_namespace *ns)
141 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
142 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
143 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
147 static int __init ipc_ns_init(void)
149 shm_init_ns(&init_ipc_ns);
153 pure_initcall(ipc_ns_init);
155 void __init shm_init(void)
157 ipc_init_proc_interface("sysvipc/shm",
158 #if BITS_PER_LONG <= 32
159 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
161 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
163 IPC_SHM_IDS, sysvipc_shm_proc_show);
166 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
168 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
171 return ERR_CAST(ipcp);
173 return container_of(ipcp, struct shmid_kernel, shm_perm);
176 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
178 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
181 return ERR_CAST(ipcp);
183 return container_of(ipcp, struct shmid_kernel, shm_perm);
187 * shm_lock_(check_) routines are called in the paths where the rwsem
188 * is not necessarily held.
190 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
192 struct kern_ipc_perm *ipcp;
195 ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
199 ipc_lock_object(ipcp);
201 * ipc_rmid() may have already freed the ID while ipc_lock_object()
202 * was spinning: here verify that the structure is still valid.
203 * Upon races with RMID, return -EIDRM, thus indicating that
204 * the ID points to a removed identifier.
206 if (ipc_valid_object(ipcp)) {
207 /* return a locked ipc object upon success */
208 return container_of(ipcp, struct shmid_kernel, shm_perm);
211 ipc_unlock_object(ipcp);
212 ipcp = ERR_PTR(-EIDRM);
216 * Callers of shm_lock() must validate the status of the returned ipc
217 * object pointer and error out as appropriate.
219 return ERR_CAST(ipcp);
222 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
225 ipc_lock_object(&ipcp->shm_perm);
228 static void shm_rcu_free(struct rcu_head *head)
230 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
232 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
234 security_shm_free(&shp->shm_perm);
239 * It has to be called with shp locked.
240 * It must be called before ipc_rmid()
242 static inline void shm_clist_rm(struct shmid_kernel *shp)
244 struct task_struct *creator;
246 /* ensure that shm_creator does not disappear */
250 * A concurrent exit_shm may do a list_del_init() as well.
251 * Just do nothing if exit_shm already did the work
253 if (!list_empty(&shp->shm_clist)) {
255 * shp->shm_creator is guaranteed to be valid *only*
256 * if shp->shm_clist is not empty.
258 creator = shp->shm_creator;
262 * list_del_init() is a nop if the entry was already removed
265 list_del_init(&shp->shm_clist);
266 task_unlock(creator);
271 static inline void shm_rmid(struct shmid_kernel *s)
274 ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
278 static int __shm_open(struct vm_area_struct *vma)
280 struct file *file = vma->vm_file;
281 struct shm_file_data *sfd = shm_file_data(file);
282 struct shmid_kernel *shp;
284 shp = shm_lock(sfd->ns, sfd->id);
289 if (shp->shm_file != sfd->file) {
295 shp->shm_atim = ktime_get_real_seconds();
296 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
302 /* This is called by fork, once for every shm attach. */
303 static void shm_open(struct vm_area_struct *vma)
305 int err = __shm_open(vma);
307 * We raced in the idr lookup or with shm_destroy().
308 * Either way, the ID is busted.
314 * shm_destroy - free the struct shmid_kernel
317 * @shp: struct to free
319 * It has to be called with shp and shm_ids.rwsem (writer) locked,
320 * but returns with shp unlocked and freed.
322 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
324 struct file *shm_file;
326 shm_file = shp->shm_file;
327 shp->shm_file = NULL;
328 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
331 if (!is_file_hugepages(shm_file))
332 shmem_lock(shm_file, 0, shp->mlock_ucounts);
334 ipc_update_pid(&shp->shm_cprid, NULL);
335 ipc_update_pid(&shp->shm_lprid, NULL);
336 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
340 * shm_may_destroy - identifies whether shm segment should be destroyed now
342 * Returns true if and only if there are no active users of the segment and
343 * one of the following is true:
345 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
347 * 2) sysctl kernel.shm_rmid_forced is set to 1.
349 static bool shm_may_destroy(struct shmid_kernel *shp)
351 return (shp->shm_nattch == 0) &&
352 (shp->ns->shm_rmid_forced ||
353 (shp->shm_perm.mode & SHM_DEST));
357 * remove the attach descriptor vma.
358 * free memory for segment if it is marked destroyed.
359 * The descriptor has already been removed from the current->mm->mmap list
360 * and will later be kfree()d.
362 static void shm_close(struct vm_area_struct *vma)
364 struct file *file = vma->vm_file;
365 struct shm_file_data *sfd = shm_file_data(file);
366 struct shmid_kernel *shp;
367 struct ipc_namespace *ns = sfd->ns;
369 down_write(&shm_ids(ns).rwsem);
370 /* remove from the list of attaches of the shm segment */
371 shp = shm_lock(ns, sfd->id);
374 * We raced in the idr lookup or with shm_destroy().
375 * Either way, the ID is busted.
377 if (WARN_ON_ONCE(IS_ERR(shp)))
378 goto done; /* no-op */
380 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
381 shp->shm_dtim = ktime_get_real_seconds();
383 if (shm_may_destroy(shp))
384 shm_destroy(ns, shp);
388 up_write(&shm_ids(ns).rwsem);
391 /* Called with ns->shm_ids(ns).rwsem locked */
392 static int shm_try_destroy_orphaned(int id, void *p, void *data)
394 struct ipc_namespace *ns = data;
395 struct kern_ipc_perm *ipcp = p;
396 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
399 * We want to destroy segments without users and with already
400 * exit'ed originating process.
402 * As shp->* are changed under rwsem, it's safe to skip shp locking.
404 if (!list_empty(&shp->shm_clist))
407 if (shm_may_destroy(shp)) {
408 shm_lock_by_ptr(shp);
409 shm_destroy(ns, shp);
414 void shm_destroy_orphaned(struct ipc_namespace *ns)
416 down_write(&shm_ids(ns).rwsem);
417 if (shm_ids(ns).in_use)
418 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
419 up_write(&shm_ids(ns).rwsem);
422 /* Locking assumes this will only be called with task == current */
423 void exit_shm(struct task_struct *task)
426 struct shmid_kernel *shp;
427 struct ipc_namespace *ns;
431 if (list_empty(&task->sysvshm.shm_clist)) {
436 shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
440 * 1) Get pointer to the ipc namespace. It is worth to say
441 * that this pointer is guaranteed to be valid because
442 * shp lifetime is always shorter than namespace lifetime
443 * in which shp lives.
444 * We taken task_lock it means that shp won't be freed.
449 * 2) If kernel.shm_rmid_forced is not set then only keep track of
450 * which shmids are orphaned, so that a later set of the sysctl
453 if (!ns->shm_rmid_forced)
454 goto unlink_continue;
457 * 3) get a reference to the namespace.
458 * The refcount could be already 0. If it is 0, then
459 * the shm objects will be free by free_ipc_work().
461 ns = get_ipc_ns_not_zero(ns);
464 list_del_init(&shp->shm_clist);
470 * 4) get a reference to shp.
471 * This cannot fail: shm_clist_rm() is called before
472 * ipc_rmid(), thus the refcount cannot be 0.
474 WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
477 * 5) unlink the shm segment from the list of segments
478 * created by current.
479 * This must be done last. After unlinking,
480 * only the refcounts obtained above prevent IPC_RMID
481 * from destroying the segment or the namespace.
483 list_del_init(&shp->shm_clist);
488 * 6) we have all references
489 * Thus lock & if needed destroy shp.
491 down_write(&shm_ids(ns).rwsem);
492 shm_lock_by_ptr(shp);
494 * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
495 * safe to call ipc_rcu_putref here
497 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
499 if (ipc_valid_object(&shp->shm_perm)) {
500 if (shm_may_destroy(shp))
501 shm_destroy(ns, shp);
506 * Someone else deleted the shp from namespace
507 * idr/kht while we have waited.
508 * Just unlock and continue.
513 up_write(&shm_ids(ns).rwsem);
514 put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
518 static vm_fault_t shm_fault(struct vm_fault *vmf)
520 struct file *file = vmf->vma->vm_file;
521 struct shm_file_data *sfd = shm_file_data(file);
523 return sfd->vm_ops->fault(vmf);
526 static int shm_may_split(struct vm_area_struct *vma, unsigned long addr)
528 struct file *file = vma->vm_file;
529 struct shm_file_data *sfd = shm_file_data(file);
531 if (sfd->vm_ops->may_split)
532 return sfd->vm_ops->may_split(vma, addr);
537 static unsigned long shm_pagesize(struct vm_area_struct *vma)
539 struct file *file = vma->vm_file;
540 struct shm_file_data *sfd = shm_file_data(file);
542 if (sfd->vm_ops->pagesize)
543 return sfd->vm_ops->pagesize(vma);
549 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
551 struct file *file = vma->vm_file;
552 struct shm_file_data *sfd = shm_file_data(file);
555 if (sfd->vm_ops->set_policy)
556 err = sfd->vm_ops->set_policy(vma, new);
560 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
563 struct file *file = vma->vm_file;
564 struct shm_file_data *sfd = shm_file_data(file);
565 struct mempolicy *pol = NULL;
567 if (sfd->vm_ops->get_policy)
568 pol = sfd->vm_ops->get_policy(vma, addr);
569 else if (vma->vm_policy)
570 pol = vma->vm_policy;
576 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
578 struct shm_file_data *sfd = shm_file_data(file);
582 * In case of remap_file_pages() emulation, the file can represent an
583 * IPC ID that was removed, and possibly even reused by another shm
584 * segment already. Propagate this case as an error to caller.
586 ret = __shm_open(vma);
590 ret = call_mmap(sfd->file, vma);
595 sfd->vm_ops = vma->vm_ops;
597 WARN_ON(!sfd->vm_ops->fault);
599 vma->vm_ops = &shm_vm_ops;
603 static int shm_release(struct inode *ino, struct file *file)
605 struct shm_file_data *sfd = shm_file_data(file);
609 shm_file_data(file) = NULL;
614 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
616 struct shm_file_data *sfd = shm_file_data(file);
618 if (!sfd->file->f_op->fsync)
620 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
623 static long shm_fallocate(struct file *file, int mode, loff_t offset,
626 struct shm_file_data *sfd = shm_file_data(file);
628 if (!sfd->file->f_op->fallocate)
630 return sfd->file->f_op->fallocate(file, mode, offset, len);
633 static unsigned long shm_get_unmapped_area(struct file *file,
634 unsigned long addr, unsigned long len, unsigned long pgoff,
637 struct shm_file_data *sfd = shm_file_data(file);
639 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
643 static const struct file_operations shm_file_operations = {
646 .release = shm_release,
647 .get_unmapped_area = shm_get_unmapped_area,
648 .llseek = noop_llseek,
649 .fallocate = shm_fallocate,
653 * shm_file_operations_huge is now identical to shm_file_operations,
654 * but we keep it distinct for the sake of is_file_shm_hugepages().
656 static const struct file_operations shm_file_operations_huge = {
659 .release = shm_release,
660 .get_unmapped_area = shm_get_unmapped_area,
661 .llseek = noop_llseek,
662 .fallocate = shm_fallocate,
665 bool is_file_shm_hugepages(struct file *file)
667 return file->f_op == &shm_file_operations_huge;
670 static const struct vm_operations_struct shm_vm_ops = {
671 .open = shm_open, /* callback for a new vm-area open */
672 .close = shm_close, /* callback for when the vm-area is released */
674 .may_split = shm_may_split,
675 .pagesize = shm_pagesize,
676 #if defined(CONFIG_NUMA)
677 .set_policy = shm_set_policy,
678 .get_policy = shm_get_policy,
683 * newseg - Create a new shared memory segment
685 * @params: ptr to the structure that contains key, size and shmflg
687 * Called with shm_ids.rwsem held as a writer.
689 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
691 key_t key = params->key;
692 int shmflg = params->flg;
693 size_t size = params->u.size;
695 struct shmid_kernel *shp;
696 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
699 vm_flags_t acctflag = 0;
701 if (size < SHMMIN || size > ns->shm_ctlmax)
704 if (numpages << PAGE_SHIFT < size)
707 if (ns->shm_tot + numpages < ns->shm_tot ||
708 ns->shm_tot + numpages > ns->shm_ctlall)
711 shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
715 shp->shm_perm.key = key;
716 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
717 shp->mlock_ucounts = NULL;
719 shp->shm_perm.security = NULL;
720 error = security_shm_alloc(&shp->shm_perm);
726 sprintf(name, "SYSV%08x", key);
727 if (shmflg & SHM_HUGETLB) {
731 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
736 hugesize = ALIGN(size, huge_page_size(hs));
738 /* hugetlb_file_setup applies strict accounting */
739 if (shmflg & SHM_NORESERVE)
740 acctflag = VM_NORESERVE;
741 file = hugetlb_file_setup(name, hugesize, acctflag,
742 HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
745 * Do not allow no accounting for OVERCOMMIT_NEVER, even
748 if ((shmflg & SHM_NORESERVE) &&
749 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
750 acctflag = VM_NORESERVE;
751 file = shmem_kernel_file_setup(name, size, acctflag);
753 error = PTR_ERR(file);
757 shp->shm_cprid = get_pid(task_tgid(current));
758 shp->shm_lprid = NULL;
759 shp->shm_atim = shp->shm_dtim = 0;
760 shp->shm_ctim = ktime_get_real_seconds();
761 shp->shm_segsz = size;
763 shp->shm_file = file;
764 shp->shm_creator = current;
766 /* ipc_addid() locks shp upon success. */
767 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
774 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
775 task_unlock(current);
778 * shmid gets reported as "inode#" in /proc/pid/maps.
779 * proc-ps tools use this. Changing this will break them.
781 file_inode(file)->i_ino = shp->shm_perm.id;
783 ns->shm_tot += numpages;
784 error = shp->shm_perm.id;
786 ipc_unlock_object(&shp->shm_perm);
791 ipc_update_pid(&shp->shm_cprid, NULL);
792 ipc_update_pid(&shp->shm_lprid, NULL);
794 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
797 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
802 * Called with shm_ids.rwsem and ipcp locked.
804 static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
806 struct shmid_kernel *shp;
808 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
809 if (shp->shm_segsz < params->u.size)
815 long ksys_shmget(key_t key, size_t size, int shmflg)
817 struct ipc_namespace *ns;
818 static const struct ipc_ops shm_ops = {
820 .associate = security_shm_associate,
821 .more_checks = shm_more_checks,
823 struct ipc_params shm_params;
825 ns = current->nsproxy->ipc_ns;
827 shm_params.key = key;
828 shm_params.flg = shmflg;
829 shm_params.u.size = size;
831 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
834 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
836 return ksys_shmget(key, size, shmflg);
839 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
843 return copy_to_user(buf, in, sizeof(*in));
848 memset(&out, 0, sizeof(out));
849 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
850 out.shm_segsz = in->shm_segsz;
851 out.shm_atime = in->shm_atime;
852 out.shm_dtime = in->shm_dtime;
853 out.shm_ctime = in->shm_ctime;
854 out.shm_cpid = in->shm_cpid;
855 out.shm_lpid = in->shm_lpid;
856 out.shm_nattch = in->shm_nattch;
858 return copy_to_user(buf, &out, sizeof(out));
865 static inline unsigned long
866 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
870 if (copy_from_user(out, buf, sizeof(*out)))
875 struct shmid_ds tbuf_old;
877 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
880 out->shm_perm.uid = tbuf_old.shm_perm.uid;
881 out->shm_perm.gid = tbuf_old.shm_perm.gid;
882 out->shm_perm.mode = tbuf_old.shm_perm.mode;
891 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
895 return copy_to_user(buf, in, sizeof(*in));
900 if (in->shmmax > INT_MAX)
901 out.shmmax = INT_MAX;
903 out.shmmax = (int)in->shmmax;
905 out.shmmin = in->shmmin;
906 out.shmmni = in->shmmni;
907 out.shmseg = in->shmseg;
908 out.shmall = in->shmall;
910 return copy_to_user(buf, &out, sizeof(out));
918 * Calculate and add used RSS and swap pages of a shm.
919 * Called with shm_ids.rwsem held as a reader
921 static void shm_add_rss_swap(struct shmid_kernel *shp,
922 unsigned long *rss_add, unsigned long *swp_add)
926 inode = file_inode(shp->shm_file);
928 if (is_file_hugepages(shp->shm_file)) {
929 struct address_space *mapping = inode->i_mapping;
930 struct hstate *h = hstate_file(shp->shm_file);
931 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
934 struct shmem_inode_info *info = SHMEM_I(inode);
936 spin_lock_irq(&info->lock);
937 *rss_add += inode->i_mapping->nrpages;
938 *swp_add += info->swapped;
939 spin_unlock_irq(&info->lock);
941 *rss_add += inode->i_mapping->nrpages;
947 * Called with shm_ids.rwsem held as a reader
949 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
958 in_use = shm_ids(ns).in_use;
960 for (total = 0, next_id = 0; total < in_use; next_id++) {
961 struct kern_ipc_perm *ipc;
962 struct shmid_kernel *shp;
964 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
967 shp = container_of(ipc, struct shmid_kernel, shm_perm);
969 shm_add_rss_swap(shp, rss, swp);
976 * This function handles some shmctl commands which require the rwsem
977 * to be held in write mode.
978 * NOTE: no locks must be held, the rwsem is taken inside this function.
980 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
981 struct shmid64_ds *shmid64)
983 struct kern_ipc_perm *ipcp;
984 struct shmid_kernel *shp;
987 down_write(&shm_ids(ns).rwsem);
990 ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd,
991 &shmid64->shm_perm, 0);
997 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
999 err = security_shm_shmctl(&shp->shm_perm, cmd);
1005 ipc_lock_object(&shp->shm_perm);
1006 /* do_shm_rmid unlocks the ipc object and rcu */
1007 do_shm_rmid(ns, ipcp);
1010 ipc_lock_object(&shp->shm_perm);
1011 err = ipc_update_perm(&shmid64->shm_perm, ipcp);
1014 shp->shm_ctim = ktime_get_real_seconds();
1022 ipc_unlock_object(&shp->shm_perm);
1026 up_write(&shm_ids(ns).rwsem);
1030 static int shmctl_ipc_info(struct ipc_namespace *ns,
1031 struct shminfo64 *shminfo)
1033 int err = security_shm_shmctl(NULL, IPC_INFO);
1035 memset(shminfo, 0, sizeof(*shminfo));
1036 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
1037 shminfo->shmmax = ns->shm_ctlmax;
1038 shminfo->shmall = ns->shm_ctlall;
1039 shminfo->shmmin = SHMMIN;
1040 down_read(&shm_ids(ns).rwsem);
1041 err = ipc_get_maxidx(&shm_ids(ns));
1042 up_read(&shm_ids(ns).rwsem);
1049 static int shmctl_shm_info(struct ipc_namespace *ns,
1050 struct shm_info *shm_info)
1052 int err = security_shm_shmctl(NULL, SHM_INFO);
1054 memset(shm_info, 0, sizeof(*shm_info));
1055 down_read(&shm_ids(ns).rwsem);
1056 shm_info->used_ids = shm_ids(ns).in_use;
1057 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
1058 shm_info->shm_tot = ns->shm_tot;
1059 shm_info->swap_attempts = 0;
1060 shm_info->swap_successes = 0;
1061 err = ipc_get_maxidx(&shm_ids(ns));
1062 up_read(&shm_ids(ns).rwsem);
1069 static int shmctl_stat(struct ipc_namespace *ns, int shmid,
1070 int cmd, struct shmid64_ds *tbuf)
1072 struct shmid_kernel *shp;
1075 memset(tbuf, 0, sizeof(*tbuf));
1078 if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
1079 shp = shm_obtain_object(ns, shmid);
1084 } else { /* IPC_STAT */
1085 shp = shm_obtain_object_check(ns, shmid);
1093 * Semantically SHM_STAT_ANY ought to be identical to
1094 * that functionality provided by the /proc/sysvipc/
1095 * interface. As such, only audit these calls and
1096 * do not do traditional S_IRUGO permission checks on
1099 if (cmd == SHM_STAT_ANY)
1100 audit_ipc_obj(&shp->shm_perm);
1103 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1107 err = security_shm_shmctl(&shp->shm_perm, cmd);
1111 ipc_lock_object(&shp->shm_perm);
1113 if (!ipc_valid_object(&shp->shm_perm)) {
1114 ipc_unlock_object(&shp->shm_perm);
1119 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1120 tbuf->shm_segsz = shp->shm_segsz;
1121 tbuf->shm_atime = shp->shm_atim;
1122 tbuf->shm_dtime = shp->shm_dtim;
1123 tbuf->shm_ctime = shp->shm_ctim;
1124 #ifndef CONFIG_64BIT
1125 tbuf->shm_atime_high = shp->shm_atim >> 32;
1126 tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1127 tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1129 tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
1130 tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
1131 tbuf->shm_nattch = shp->shm_nattch;
1133 if (cmd == IPC_STAT) {
1135 * As defined in SUS:
1136 * Return 0 on success
1141 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1142 * Return the full id, including the sequence number
1144 err = shp->shm_perm.id;
1147 ipc_unlock_object(&shp->shm_perm);
1153 static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1155 struct shmid_kernel *shp;
1156 struct file *shm_file;
1160 shp = shm_obtain_object_check(ns, shmid);
1166 audit_ipc_obj(&(shp->shm_perm));
1167 err = security_shm_shmctl(&shp->shm_perm, cmd);
1171 ipc_lock_object(&shp->shm_perm);
1173 /* check if shm_destroy() is tearing down shp */
1174 if (!ipc_valid_object(&shp->shm_perm)) {
1179 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1180 kuid_t euid = current_euid();
1182 if (!uid_eq(euid, shp->shm_perm.uid) &&
1183 !uid_eq(euid, shp->shm_perm.cuid)) {
1187 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1193 shm_file = shp->shm_file;
1194 if (is_file_hugepages(shm_file))
1197 if (cmd == SHM_LOCK) {
1198 struct ucounts *ucounts = current_ucounts();
1200 err = shmem_lock(shm_file, 1, ucounts);
1201 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1202 shp->shm_perm.mode |= SHM_LOCKED;
1203 shp->mlock_ucounts = ucounts;
1209 if (!(shp->shm_perm.mode & SHM_LOCKED))
1211 shmem_lock(shm_file, 0, shp->mlock_ucounts);
1212 shp->shm_perm.mode &= ~SHM_LOCKED;
1213 shp->mlock_ucounts = NULL;
1215 ipc_unlock_object(&shp->shm_perm);
1217 shmem_unlock_mapping(shm_file->f_mapping);
1223 ipc_unlock_object(&shp->shm_perm);
1229 static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version)
1232 struct ipc_namespace *ns;
1233 struct shmid64_ds sem64;
1235 if (cmd < 0 || shmid < 0)
1238 ns = current->nsproxy->ipc_ns;
1242 struct shminfo64 shminfo;
1243 err = shmctl_ipc_info(ns, &shminfo);
1246 if (copy_shminfo_to_user(buf, &shminfo, version))
1251 struct shm_info shm_info;
1252 err = shmctl_shm_info(ns, &shm_info);
1255 if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1262 err = shmctl_stat(ns, shmid, cmd, &sem64);
1265 if (copy_shmid_to_user(buf, &sem64, version))
1270 if (copy_shmid_from_user(&sem64, buf, version))
1274 return shmctl_down(ns, shmid, cmd, &sem64);
1277 return shmctl_do_lock(ns, shmid, cmd);
1283 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1285 return ksys_shmctl(shmid, cmd, buf, IPC_64);
1288 #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1289 long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1291 int version = ipc_parse_version(&cmd);
1293 return ksys_shmctl(shmid, cmd, buf, version);
1296 SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1298 return ksys_old_shmctl(shmid, cmd, buf);
1302 #ifdef CONFIG_COMPAT
1304 struct compat_shmid_ds {
1305 struct compat_ipc_perm shm_perm;
1307 old_time32_t shm_atime;
1308 old_time32_t shm_dtime;
1309 old_time32_t shm_ctime;
1310 compat_ipc_pid_t shm_cpid;
1311 compat_ipc_pid_t shm_lpid;
1312 unsigned short shm_nattch;
1313 unsigned short shm_unused;
1314 compat_uptr_t shm_unused2;
1315 compat_uptr_t shm_unused3;
1318 struct compat_shminfo64 {
1319 compat_ulong_t shmmax;
1320 compat_ulong_t shmmin;
1321 compat_ulong_t shmmni;
1322 compat_ulong_t shmseg;
1323 compat_ulong_t shmall;
1324 compat_ulong_t __unused1;
1325 compat_ulong_t __unused2;
1326 compat_ulong_t __unused3;
1327 compat_ulong_t __unused4;
1330 struct compat_shm_info {
1331 compat_int_t used_ids;
1332 compat_ulong_t shm_tot, shm_rss, shm_swp;
1333 compat_ulong_t swap_attempts, swap_successes;
1336 static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1339 if (in->shmmax > INT_MAX)
1340 in->shmmax = INT_MAX;
1341 if (version == IPC_64) {
1342 struct compat_shminfo64 info;
1343 memset(&info, 0, sizeof(info));
1344 info.shmmax = in->shmmax;
1345 info.shmmin = in->shmmin;
1346 info.shmmni = in->shmmni;
1347 info.shmseg = in->shmseg;
1348 info.shmall = in->shmall;
1349 return copy_to_user(buf, &info, sizeof(info));
1351 struct shminfo info;
1352 memset(&info, 0, sizeof(info));
1353 info.shmmax = in->shmmax;
1354 info.shmmin = in->shmmin;
1355 info.shmmni = in->shmmni;
1356 info.shmseg = in->shmseg;
1357 info.shmall = in->shmall;
1358 return copy_to_user(buf, &info, sizeof(info));
1362 static int put_compat_shm_info(struct shm_info *ip,
1363 struct compat_shm_info __user *uip)
1365 struct compat_shm_info info;
1367 memset(&info, 0, sizeof(info));
1368 info.used_ids = ip->used_ids;
1369 info.shm_tot = ip->shm_tot;
1370 info.shm_rss = ip->shm_rss;
1371 info.shm_swp = ip->shm_swp;
1372 info.swap_attempts = ip->swap_attempts;
1373 info.swap_successes = ip->swap_successes;
1374 return copy_to_user(uip, &info, sizeof(info));
1377 static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1380 if (version == IPC_64) {
1381 struct compat_shmid64_ds v;
1382 memset(&v, 0, sizeof(v));
1383 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1384 v.shm_atime = lower_32_bits(in->shm_atime);
1385 v.shm_atime_high = upper_32_bits(in->shm_atime);
1386 v.shm_dtime = lower_32_bits(in->shm_dtime);
1387 v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1388 v.shm_ctime = lower_32_bits(in->shm_ctime);
1389 v.shm_ctime_high = upper_32_bits(in->shm_ctime);
1390 v.shm_segsz = in->shm_segsz;
1391 v.shm_nattch = in->shm_nattch;
1392 v.shm_cpid = in->shm_cpid;
1393 v.shm_lpid = in->shm_lpid;
1394 return copy_to_user(buf, &v, sizeof(v));
1396 struct compat_shmid_ds v;
1397 memset(&v, 0, sizeof(v));
1398 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1399 v.shm_perm.key = in->shm_perm.key;
1400 v.shm_atime = in->shm_atime;
1401 v.shm_dtime = in->shm_dtime;
1402 v.shm_ctime = in->shm_ctime;
1403 v.shm_segsz = in->shm_segsz;
1404 v.shm_nattch = in->shm_nattch;
1405 v.shm_cpid = in->shm_cpid;
1406 v.shm_lpid = in->shm_lpid;
1407 return copy_to_user(buf, &v, sizeof(v));
1411 static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1414 memset(out, 0, sizeof(*out));
1415 if (version == IPC_64) {
1416 struct compat_shmid64_ds __user *p = buf;
1417 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1419 struct compat_shmid_ds __user *p = buf;
1420 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1424 static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
1426 struct ipc_namespace *ns;
1427 struct shmid64_ds sem64;
1430 ns = current->nsproxy->ipc_ns;
1432 if (cmd < 0 || shmid < 0)
1437 struct shminfo64 shminfo;
1438 err = shmctl_ipc_info(ns, &shminfo);
1441 if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1446 struct shm_info shm_info;
1447 err = shmctl_shm_info(ns, &shm_info);
1450 if (put_compat_shm_info(&shm_info, uptr))
1457 err = shmctl_stat(ns, shmid, cmd, &sem64);
1460 if (copy_compat_shmid_to_user(uptr, &sem64, version))
1465 if (copy_compat_shmid_from_user(&sem64, uptr, version))
1469 return shmctl_down(ns, shmid, cmd, &sem64);
1472 return shmctl_do_lock(ns, shmid, cmd);
1479 COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1481 return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64);
1484 #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1485 long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr)
1487 int version = compat_ipc_parse_version(&cmd);
1489 return compat_ksys_shmctl(shmid, cmd, uptr, version);
1492 COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr)
1494 return compat_ksys_old_shmctl(shmid, cmd, uptr);
1500 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1502 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1503 * "raddr" thing points to kernel space, and there has to be a wrapper around
1506 long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1507 ulong *raddr, unsigned long shmlba)
1509 struct shmid_kernel *shp;
1510 unsigned long addr = (unsigned long)shmaddr;
1512 struct file *file, *base;
1514 unsigned long flags = MAP_SHARED;
1517 struct ipc_namespace *ns;
1518 struct shm_file_data *sfd;
1520 unsigned long populate = 0;
1527 if (addr & (shmlba - 1)) {
1528 if (shmflg & SHM_RND) {
1529 addr &= ~(shmlba - 1); /* round down */
1532 * Ensure that the round-down is non-nil
1533 * when remapping. This can happen for
1534 * cases when addr < shmlba.
1536 if (!addr && (shmflg & SHM_REMAP))
1539 #ifndef __ARCH_FORCE_SHMLBA
1540 if (addr & ~PAGE_MASK)
1546 } else if ((shmflg & SHM_REMAP))
1549 if (shmflg & SHM_RDONLY) {
1554 prot = PROT_READ | PROT_WRITE;
1555 acc_mode = S_IRUGO | S_IWUGO;
1558 if (shmflg & SHM_EXEC) {
1560 acc_mode |= S_IXUGO;
1564 * We cannot rely on the fs check since SYSV IPC does have an
1565 * additional creator id...
1567 ns = current->nsproxy->ipc_ns;
1569 shp = shm_obtain_object_check(ns, shmid);
1576 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1579 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1583 ipc_lock_object(&shp->shm_perm);
1585 /* check if shm_destroy() is tearing down shp */
1586 if (!ipc_valid_object(&shp->shm_perm)) {
1587 ipc_unlock_object(&shp->shm_perm);
1593 * We need to take a reference to the real shm file to prevent the
1594 * pointer from becoming stale in cases where the lifetime of the outer
1595 * file extends beyond that of the shm segment. It's not usually
1596 * possible, but it can happen during remap_file_pages() emulation as
1597 * that unmaps the memory, then does ->mmap() via file reference only.
1598 * We'll deny the ->mmap() if the shm segment was since removed, but to
1599 * detect shm ID reuse we need to compare the file pointers.
1601 base = get_file(shp->shm_file);
1603 size = i_size_read(file_inode(base));
1604 ipc_unlock_object(&shp->shm_perm);
1608 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1614 file = alloc_file_clone(base, f_flags,
1615 is_file_hugepages(base) ?
1616 &shm_file_operations_huge :
1617 &shm_file_operations);
1618 err = PTR_ERR(file);
1625 sfd->id = shp->shm_perm.id;
1626 sfd->ns = get_ipc_ns(ns);
1629 file->private_data = sfd;
1631 err = security_mmap_file(file, prot, flags);
1635 if (mmap_write_lock_killable(current->mm)) {
1640 if (addr && !(shmflg & SHM_REMAP)) {
1642 if (addr + size < addr)
1645 if (find_vma_intersection(current->mm, addr, addr + size))
1649 addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL);
1652 if (IS_ERR_VALUE(addr))
1655 mmap_write_unlock(current->mm);
1657 mm_populate(addr, populate);
1663 down_write(&shm_ids(ns).rwsem);
1664 shp = shm_lock(ns, shmid);
1667 if (shm_may_destroy(shp))
1668 shm_destroy(ns, shp);
1671 up_write(&shm_ids(ns).rwsem);
1680 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1685 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1688 force_successful_syscall_return();
1692 #ifdef CONFIG_COMPAT
1694 #ifndef COMPAT_SHMLBA
1695 #define COMPAT_SHMLBA SHMLBA
1698 COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1703 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1706 force_successful_syscall_return();
1712 * detach and kill segment if marked destroyed.
1713 * The work is done in shm_close.
1715 long ksys_shmdt(char __user *shmaddr)
1717 struct mm_struct *mm = current->mm;
1718 struct vm_area_struct *vma;
1719 unsigned long addr = (unsigned long)shmaddr;
1720 int retval = -EINVAL;
1724 VMA_ITERATOR(vmi, mm, addr);
1727 if (addr & ~PAGE_MASK)
1730 if (mmap_write_lock_killable(mm))
1734 * This function tries to be smart and unmap shm segments that
1735 * were modified by partial mlock or munmap calls:
1736 * - It first determines the size of the shm segment that should be
1737 * unmapped: It searches for a vma that is backed by shm and that
1738 * started at address shmaddr. It records it's size and then unmaps
1740 * - Then it unmaps all shm vmas that started at shmaddr and that
1741 * are within the initially determined size and that are from the
1742 * same shm segment from which we determined the size.
1743 * Errors from do_munmap are ignored: the function only fails if
1744 * it's called with invalid parameters or if it's called to unmap
1745 * a part of a vma. Both calls in this function are for full vmas,
1746 * the parameters are directly copied from the vma itself and always
1747 * valid - therefore do_munmap cannot fail. (famous last words?)
1750 * If it had been mremap()'d, the starting address would not
1751 * match the usual checks anyway. So assume all vma's are
1752 * above the starting address given.
1756 for_each_vma(vmi, vma) {
1758 * Check if the starting address would match, i.e. it's
1759 * a fragment created by mprotect() and/or munmap(), or it
1760 * otherwise it starts at this address with no hassles.
1762 if ((vma->vm_ops == &shm_vm_ops) &&
1763 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1766 * Record the file of the shm segment being
1767 * unmapped. With mremap(), someone could place
1768 * page from another segment but with equal offsets
1769 * in the range we are unmapping.
1771 file = vma->vm_file;
1772 size = i_size_read(file_inode(vma->vm_file));
1773 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1774 mas_pause(&vmi.mas);
1776 * We discovered the size of the shm segment, so
1777 * break out of here and fall through to the next
1778 * loop that uses the size information to stop
1779 * searching for matching vma's.
1782 vma = vma_next(&vmi);
1788 * We need look no further than the maximum address a fragment
1789 * could possibly have landed at. Also cast things to loff_t to
1790 * prevent overflows and make comparisons vs. equal-width types.
1792 size = PAGE_ALIGN(size);
1793 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1794 /* finding a matching vma now does not alter retval */
1795 if ((vma->vm_ops == &shm_vm_ops) &&
1796 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1797 (vma->vm_file == file)) {
1798 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1799 mas_pause(&vmi.mas);
1802 vma = vma_next(&vmi);
1805 #else /* CONFIG_MMU */
1806 vma = vma_lookup(mm, addr);
1807 /* under NOMMU conditions, the exact address to be destroyed must be
1810 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1811 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1817 mmap_write_unlock(mm);
1821 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1823 return ksys_shmdt(shmaddr);
1826 #ifdef CONFIG_PROC_FS
1827 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1829 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1830 struct user_namespace *user_ns = seq_user_ns(s);
1831 struct kern_ipc_perm *ipcp = it;
1832 struct shmid_kernel *shp;
1833 unsigned long rss = 0, swp = 0;
1835 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1836 shm_add_rss_swap(shp, &rss, &swp);
1838 #if BITS_PER_LONG <= 32
1839 #define SIZE_SPEC "%10lu"
1841 #define SIZE_SPEC "%21lu"
1845 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1846 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1847 SIZE_SPEC " " SIZE_SPEC "\n",
1852 pid_nr_ns(shp->shm_cprid, pid_ns),
1853 pid_nr_ns(shp->shm_lprid, pid_ns),
1855 from_kuid_munged(user_ns, shp->shm_perm.uid),
1856 from_kgid_munged(user_ns, shp->shm_perm.gid),
1857 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1858 from_kgid_munged(user_ns, shp->shm_perm.cgid),