1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
5 * Many improvements/fixes by Bruno Haible.
6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
24 * Better ipc lock (kern_ipc_perm.lock) handling
25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
28 #include <linux/slab.h>
30 #include <linux/hugetlb.h>
31 #include <linux/shm.h>
32 #include <linux/init.h>
33 #include <linux/file.h>
34 #include <linux/mman.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/security.h>
37 #include <linux/syscalls.h>
38 #include <linux/audit.h>
39 #include <linux/capability.h>
40 #include <linux/ptrace.h>
41 #include <linux/seq_file.h>
42 #include <linux/rwsem.h>
43 #include <linux/nsproxy.h>
44 #include <linux/mount.h>
45 #include <linux/ipc_namespace.h>
46 #include <linux/rhashtable.h>
48 #include <linux/uaccess.h>
52 struct shmid_kernel /* private to the kernel */
54 struct kern_ipc_perm shm_perm;
55 struct file *shm_file;
56 unsigned long shm_nattch;
57 unsigned long shm_segsz;
61 struct pid *shm_cprid;
62 struct pid *shm_lprid;
63 struct ucounts *mlock_ucounts;
66 * The task created the shm object, for
67 * task_lock(shp->shm_creator)
69 struct task_struct *shm_creator;
72 * List by creator. task_lock(->shm_creator) required for read/write.
73 * If list_empty(), then the creator is dead already.
75 struct list_head shm_clist;
76 struct ipc_namespace *ns;
79 /* shm_mode upper byte flags */
80 #define SHM_DEST 01000 /* segment will be destroyed on last detach */
81 #define SHM_LOCKED 02000 /* segment will not be swapped */
83 struct shm_file_data {
85 struct ipc_namespace *ns;
87 const struct vm_operations_struct *vm_ops;
90 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
92 static const struct file_operations shm_file_operations;
93 static const struct vm_operations_struct shm_vm_ops;
95 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
97 #define shm_unlock(shp) \
98 ipc_unlock(&(shp)->shm_perm)
100 static int newseg(struct ipc_namespace *, struct ipc_params *);
101 static void shm_open(struct vm_area_struct *vma);
102 static void shm_close(struct vm_area_struct *vma);
103 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
104 #ifdef CONFIG_PROC_FS
105 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
108 void shm_init_ns(struct ipc_namespace *ns)
110 ns->shm_ctlmax = SHMMAX;
111 ns->shm_ctlall = SHMALL;
112 ns->shm_ctlmni = SHMMNI;
113 ns->shm_rmid_forced = 0;
115 ipc_init_ids(&shm_ids(ns));
119 * Called with shm_ids.rwsem (writer) and the shp structure locked.
120 * Only shm_ids.rwsem remains locked on exit.
122 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
124 struct shmid_kernel *shp;
126 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
127 WARN_ON(ns != shp->ns);
129 if (shp->shm_nattch) {
130 shp->shm_perm.mode |= SHM_DEST;
131 /* Do not find it any more */
132 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
135 shm_destroy(ns, shp);
139 void shm_exit_ns(struct ipc_namespace *ns)
141 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
142 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
143 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
147 static int __init ipc_ns_init(void)
149 shm_init_ns(&init_ipc_ns);
153 pure_initcall(ipc_ns_init);
155 void __init shm_init(void)
157 ipc_init_proc_interface("sysvipc/shm",
158 #if BITS_PER_LONG <= 32
159 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
161 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
163 IPC_SHM_IDS, sysvipc_shm_proc_show);
166 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
168 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
171 return ERR_CAST(ipcp);
173 return container_of(ipcp, struct shmid_kernel, shm_perm);
176 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
178 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
181 return ERR_CAST(ipcp);
183 return container_of(ipcp, struct shmid_kernel, shm_perm);
187 * shm_lock_(check_) routines are called in the paths where the rwsem
188 * is not necessarily held.
190 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
192 struct kern_ipc_perm *ipcp;
195 ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
199 ipc_lock_object(ipcp);
201 * ipc_rmid() may have already freed the ID while ipc_lock_object()
202 * was spinning: here verify that the structure is still valid.
203 * Upon races with RMID, return -EIDRM, thus indicating that
204 * the ID points to a removed identifier.
206 if (ipc_valid_object(ipcp)) {
207 /* return a locked ipc object upon success */
208 return container_of(ipcp, struct shmid_kernel, shm_perm);
211 ipc_unlock_object(ipcp);
212 ipcp = ERR_PTR(-EIDRM);
216 * Callers of shm_lock() must validate the status of the returned ipc
217 * object pointer and error out as appropriate.
219 return ERR_CAST(ipcp);
222 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
225 ipc_lock_object(&ipcp->shm_perm);
228 static void shm_rcu_free(struct rcu_head *head)
230 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
232 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
234 security_shm_free(&shp->shm_perm);
239 * It has to be called with shp locked.
240 * It must be called before ipc_rmid()
242 static inline void shm_clist_rm(struct shmid_kernel *shp)
244 struct task_struct *creator;
246 /* ensure that shm_creator does not disappear */
250 * A concurrent exit_shm may do a list_del_init() as well.
251 * Just do nothing if exit_shm already did the work
253 if (!list_empty(&shp->shm_clist)) {
255 * shp->shm_creator is guaranteed to be valid *only*
256 * if shp->shm_clist is not empty.
258 creator = shp->shm_creator;
262 * list_del_init() is a nop if the entry was already removed
265 list_del_init(&shp->shm_clist);
266 task_unlock(creator);
271 static inline void shm_rmid(struct shmid_kernel *s)
274 ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
278 static int __shm_open(struct shm_file_data *sfd)
280 struct shmid_kernel *shp;
282 shp = shm_lock(sfd->ns, sfd->id);
287 if (shp->shm_file != sfd->file) {
293 shp->shm_atim = ktime_get_real_seconds();
294 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
300 /* This is called by fork, once for every shm attach. */
301 static void shm_open(struct vm_area_struct *vma)
303 struct file *file = vma->vm_file;
304 struct shm_file_data *sfd = shm_file_data(file);
307 /* Always call underlying open if present */
308 if (sfd->vm_ops->open)
309 sfd->vm_ops->open(vma);
311 err = __shm_open(sfd);
313 * We raced in the idr lookup or with shm_destroy().
314 * Either way, the ID is busted.
320 * shm_destroy - free the struct shmid_kernel
323 * @shp: struct to free
325 * It has to be called with shp and shm_ids.rwsem (writer) locked,
326 * but returns with shp unlocked and freed.
328 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
330 struct file *shm_file;
332 shm_file = shp->shm_file;
333 shp->shm_file = NULL;
334 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
337 if (!is_file_hugepages(shm_file))
338 shmem_lock(shm_file, 0, shp->mlock_ucounts);
340 ipc_update_pid(&shp->shm_cprid, NULL);
341 ipc_update_pid(&shp->shm_lprid, NULL);
342 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
346 * shm_may_destroy - identifies whether shm segment should be destroyed now
348 * Returns true if and only if there are no active users of the segment and
349 * one of the following is true:
351 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
353 * 2) sysctl kernel.shm_rmid_forced is set to 1.
355 static bool shm_may_destroy(struct shmid_kernel *shp)
357 return (shp->shm_nattch == 0) &&
358 (shp->ns->shm_rmid_forced ||
359 (shp->shm_perm.mode & SHM_DEST));
363 * remove the attach descriptor vma.
364 * free memory for segment if it is marked destroyed.
365 * The descriptor has already been removed from the current->mm->mmap list
366 * and will later be kfree()d.
368 static void __shm_close(struct shm_file_data *sfd)
370 struct shmid_kernel *shp;
371 struct ipc_namespace *ns = sfd->ns;
373 down_write(&shm_ids(ns).rwsem);
374 /* remove from the list of attaches of the shm segment */
375 shp = shm_lock(ns, sfd->id);
378 * We raced in the idr lookup or with shm_destroy().
379 * Either way, the ID is busted.
381 if (WARN_ON_ONCE(IS_ERR(shp)))
382 goto done; /* no-op */
384 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
385 shp->shm_dtim = ktime_get_real_seconds();
387 if (shm_may_destroy(shp))
388 shm_destroy(ns, shp);
392 up_write(&shm_ids(ns).rwsem);
395 static void shm_close(struct vm_area_struct *vma)
397 struct file *file = vma->vm_file;
398 struct shm_file_data *sfd = shm_file_data(file);
400 /* Always call underlying close if present */
401 if (sfd->vm_ops->close)
402 sfd->vm_ops->close(vma);
407 /* Called with ns->shm_ids(ns).rwsem locked */
408 static int shm_try_destroy_orphaned(int id, void *p, void *data)
410 struct ipc_namespace *ns = data;
411 struct kern_ipc_perm *ipcp = p;
412 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
415 * We want to destroy segments without users and with already
416 * exit'ed originating process.
418 * As shp->* are changed under rwsem, it's safe to skip shp locking.
420 if (!list_empty(&shp->shm_clist))
423 if (shm_may_destroy(shp)) {
424 shm_lock_by_ptr(shp);
425 shm_destroy(ns, shp);
430 void shm_destroy_orphaned(struct ipc_namespace *ns)
432 down_write(&shm_ids(ns).rwsem);
433 if (shm_ids(ns).in_use)
434 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
435 up_write(&shm_ids(ns).rwsem);
438 /* Locking assumes this will only be called with task == current */
439 void exit_shm(struct task_struct *task)
442 struct shmid_kernel *shp;
443 struct ipc_namespace *ns;
447 if (list_empty(&task->sysvshm.shm_clist)) {
452 shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
456 * 1) Get pointer to the ipc namespace. It is worth to say
457 * that this pointer is guaranteed to be valid because
458 * shp lifetime is always shorter than namespace lifetime
459 * in which shp lives.
460 * We taken task_lock it means that shp won't be freed.
465 * 2) If kernel.shm_rmid_forced is not set then only keep track of
466 * which shmids are orphaned, so that a later set of the sysctl
469 if (!ns->shm_rmid_forced)
470 goto unlink_continue;
473 * 3) get a reference to the namespace.
474 * The refcount could be already 0. If it is 0, then
475 * the shm objects will be free by free_ipc_work().
477 ns = get_ipc_ns_not_zero(ns);
480 list_del_init(&shp->shm_clist);
486 * 4) get a reference to shp.
487 * This cannot fail: shm_clist_rm() is called before
488 * ipc_rmid(), thus the refcount cannot be 0.
490 WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
493 * 5) unlink the shm segment from the list of segments
494 * created by current.
495 * This must be done last. After unlinking,
496 * only the refcounts obtained above prevent IPC_RMID
497 * from destroying the segment or the namespace.
499 list_del_init(&shp->shm_clist);
504 * 6) we have all references
505 * Thus lock & if needed destroy shp.
507 down_write(&shm_ids(ns).rwsem);
508 shm_lock_by_ptr(shp);
510 * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
511 * safe to call ipc_rcu_putref here
513 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
515 if (ipc_valid_object(&shp->shm_perm)) {
516 if (shm_may_destroy(shp))
517 shm_destroy(ns, shp);
522 * Someone else deleted the shp from namespace
523 * idr/kht while we have waited.
524 * Just unlock and continue.
529 up_write(&shm_ids(ns).rwsem);
530 put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
534 static vm_fault_t shm_fault(struct vm_fault *vmf)
536 struct file *file = vmf->vma->vm_file;
537 struct shm_file_data *sfd = shm_file_data(file);
539 return sfd->vm_ops->fault(vmf);
542 static int shm_may_split(struct vm_area_struct *vma, unsigned long addr)
544 struct file *file = vma->vm_file;
545 struct shm_file_data *sfd = shm_file_data(file);
547 if (sfd->vm_ops->may_split)
548 return sfd->vm_ops->may_split(vma, addr);
553 static unsigned long shm_pagesize(struct vm_area_struct *vma)
555 struct file *file = vma->vm_file;
556 struct shm_file_data *sfd = shm_file_data(file);
558 if (sfd->vm_ops->pagesize)
559 return sfd->vm_ops->pagesize(vma);
565 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
567 struct file *file = vma->vm_file;
568 struct shm_file_data *sfd = shm_file_data(file);
571 if (sfd->vm_ops->set_policy)
572 err = sfd->vm_ops->set_policy(vma, new);
576 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
579 struct file *file = vma->vm_file;
580 struct shm_file_data *sfd = shm_file_data(file);
581 struct mempolicy *pol = NULL;
583 if (sfd->vm_ops->get_policy)
584 pol = sfd->vm_ops->get_policy(vma, addr);
585 else if (vma->vm_policy)
586 pol = vma->vm_policy;
592 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
594 struct shm_file_data *sfd = shm_file_data(file);
598 * In case of remap_file_pages() emulation, the file can represent an
599 * IPC ID that was removed, and possibly even reused by another shm
600 * segment already. Propagate this case as an error to caller.
602 ret = __shm_open(sfd);
606 ret = call_mmap(sfd->file, vma);
611 sfd->vm_ops = vma->vm_ops;
613 WARN_ON(!sfd->vm_ops->fault);
615 vma->vm_ops = &shm_vm_ops;
619 static int shm_release(struct inode *ino, struct file *file)
621 struct shm_file_data *sfd = shm_file_data(file);
625 shm_file_data(file) = NULL;
630 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
632 struct shm_file_data *sfd = shm_file_data(file);
634 if (!sfd->file->f_op->fsync)
636 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
639 static long shm_fallocate(struct file *file, int mode, loff_t offset,
642 struct shm_file_data *sfd = shm_file_data(file);
644 if (!sfd->file->f_op->fallocate)
646 return sfd->file->f_op->fallocate(file, mode, offset, len);
649 static unsigned long shm_get_unmapped_area(struct file *file,
650 unsigned long addr, unsigned long len, unsigned long pgoff,
653 struct shm_file_data *sfd = shm_file_data(file);
655 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
659 static const struct file_operations shm_file_operations = {
662 .release = shm_release,
663 .get_unmapped_area = shm_get_unmapped_area,
664 .llseek = noop_llseek,
665 .fallocate = shm_fallocate,
669 * shm_file_operations_huge is now identical to shm_file_operations,
670 * but we keep it distinct for the sake of is_file_shm_hugepages().
672 static const struct file_operations shm_file_operations_huge = {
675 .release = shm_release,
676 .get_unmapped_area = shm_get_unmapped_area,
677 .llseek = noop_llseek,
678 .fallocate = shm_fallocate,
681 bool is_file_shm_hugepages(struct file *file)
683 return file->f_op == &shm_file_operations_huge;
686 static const struct vm_operations_struct shm_vm_ops = {
687 .open = shm_open, /* callback for a new vm-area open */
688 .close = shm_close, /* callback for when the vm-area is released */
690 .may_split = shm_may_split,
691 .pagesize = shm_pagesize,
692 #if defined(CONFIG_NUMA)
693 .set_policy = shm_set_policy,
694 .get_policy = shm_get_policy,
699 * newseg - Create a new shared memory segment
701 * @params: ptr to the structure that contains key, size and shmflg
703 * Called with shm_ids.rwsem held as a writer.
705 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
707 key_t key = params->key;
708 int shmflg = params->flg;
709 size_t size = params->u.size;
711 struct shmid_kernel *shp;
712 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
715 vm_flags_t acctflag = 0;
717 if (size < SHMMIN || size > ns->shm_ctlmax)
720 if (numpages << PAGE_SHIFT < size)
723 if (ns->shm_tot + numpages < ns->shm_tot ||
724 ns->shm_tot + numpages > ns->shm_ctlall)
727 shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
731 shp->shm_perm.key = key;
732 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
733 shp->mlock_ucounts = NULL;
735 shp->shm_perm.security = NULL;
736 error = security_shm_alloc(&shp->shm_perm);
742 sprintf(name, "SYSV%08x", key);
743 if (shmflg & SHM_HUGETLB) {
747 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
752 hugesize = ALIGN(size, huge_page_size(hs));
754 /* hugetlb_file_setup applies strict accounting */
755 if (shmflg & SHM_NORESERVE)
756 acctflag = VM_NORESERVE;
757 file = hugetlb_file_setup(name, hugesize, acctflag,
758 HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
761 * Do not allow no accounting for OVERCOMMIT_NEVER, even
764 if ((shmflg & SHM_NORESERVE) &&
765 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
766 acctflag = VM_NORESERVE;
767 file = shmem_kernel_file_setup(name, size, acctflag);
769 error = PTR_ERR(file);
773 shp->shm_cprid = get_pid(task_tgid(current));
774 shp->shm_lprid = NULL;
775 shp->shm_atim = shp->shm_dtim = 0;
776 shp->shm_ctim = ktime_get_real_seconds();
777 shp->shm_segsz = size;
779 shp->shm_file = file;
780 shp->shm_creator = current;
782 /* ipc_addid() locks shp upon success. */
783 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
790 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
791 task_unlock(current);
794 * shmid gets reported as "inode#" in /proc/pid/maps.
795 * proc-ps tools use this. Changing this will break them.
797 file_inode(file)->i_ino = shp->shm_perm.id;
799 ns->shm_tot += numpages;
800 error = shp->shm_perm.id;
802 ipc_unlock_object(&shp->shm_perm);
807 ipc_update_pid(&shp->shm_cprid, NULL);
808 ipc_update_pid(&shp->shm_lprid, NULL);
810 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
813 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
818 * Called with shm_ids.rwsem and ipcp locked.
820 static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
822 struct shmid_kernel *shp;
824 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
825 if (shp->shm_segsz < params->u.size)
831 long ksys_shmget(key_t key, size_t size, int shmflg)
833 struct ipc_namespace *ns;
834 static const struct ipc_ops shm_ops = {
836 .associate = security_shm_associate,
837 .more_checks = shm_more_checks,
839 struct ipc_params shm_params;
841 ns = current->nsproxy->ipc_ns;
843 shm_params.key = key;
844 shm_params.flg = shmflg;
845 shm_params.u.size = size;
847 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
850 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
852 return ksys_shmget(key, size, shmflg);
855 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
859 return copy_to_user(buf, in, sizeof(*in));
864 memset(&out, 0, sizeof(out));
865 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
866 out.shm_segsz = in->shm_segsz;
867 out.shm_atime = in->shm_atime;
868 out.shm_dtime = in->shm_dtime;
869 out.shm_ctime = in->shm_ctime;
870 out.shm_cpid = in->shm_cpid;
871 out.shm_lpid = in->shm_lpid;
872 out.shm_nattch = in->shm_nattch;
874 return copy_to_user(buf, &out, sizeof(out));
881 static inline unsigned long
882 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
886 if (copy_from_user(out, buf, sizeof(*out)))
891 struct shmid_ds tbuf_old;
893 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
896 out->shm_perm.uid = tbuf_old.shm_perm.uid;
897 out->shm_perm.gid = tbuf_old.shm_perm.gid;
898 out->shm_perm.mode = tbuf_old.shm_perm.mode;
907 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
911 return copy_to_user(buf, in, sizeof(*in));
916 if (in->shmmax > INT_MAX)
917 out.shmmax = INT_MAX;
919 out.shmmax = (int)in->shmmax;
921 out.shmmin = in->shmmin;
922 out.shmmni = in->shmmni;
923 out.shmseg = in->shmseg;
924 out.shmall = in->shmall;
926 return copy_to_user(buf, &out, sizeof(out));
934 * Calculate and add used RSS and swap pages of a shm.
935 * Called with shm_ids.rwsem held as a reader
937 static void shm_add_rss_swap(struct shmid_kernel *shp,
938 unsigned long *rss_add, unsigned long *swp_add)
942 inode = file_inode(shp->shm_file);
944 if (is_file_hugepages(shp->shm_file)) {
945 struct address_space *mapping = inode->i_mapping;
946 struct hstate *h = hstate_file(shp->shm_file);
947 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
950 struct shmem_inode_info *info = SHMEM_I(inode);
952 spin_lock_irq(&info->lock);
953 *rss_add += inode->i_mapping->nrpages;
954 *swp_add += info->swapped;
955 spin_unlock_irq(&info->lock);
957 *rss_add += inode->i_mapping->nrpages;
963 * Called with shm_ids.rwsem held as a reader
965 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
974 in_use = shm_ids(ns).in_use;
976 for (total = 0, next_id = 0; total < in_use; next_id++) {
977 struct kern_ipc_perm *ipc;
978 struct shmid_kernel *shp;
980 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
983 shp = container_of(ipc, struct shmid_kernel, shm_perm);
985 shm_add_rss_swap(shp, rss, swp);
992 * This function handles some shmctl commands which require the rwsem
993 * to be held in write mode.
994 * NOTE: no locks must be held, the rwsem is taken inside this function.
996 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
997 struct shmid64_ds *shmid64)
999 struct kern_ipc_perm *ipcp;
1000 struct shmid_kernel *shp;
1003 down_write(&shm_ids(ns).rwsem);
1006 ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd,
1007 &shmid64->shm_perm, 0);
1009 err = PTR_ERR(ipcp);
1013 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1015 err = security_shm_shmctl(&shp->shm_perm, cmd);
1021 ipc_lock_object(&shp->shm_perm);
1022 /* do_shm_rmid unlocks the ipc object and rcu */
1023 do_shm_rmid(ns, ipcp);
1026 ipc_lock_object(&shp->shm_perm);
1027 err = ipc_update_perm(&shmid64->shm_perm, ipcp);
1030 shp->shm_ctim = ktime_get_real_seconds();
1038 ipc_unlock_object(&shp->shm_perm);
1042 up_write(&shm_ids(ns).rwsem);
1046 static int shmctl_ipc_info(struct ipc_namespace *ns,
1047 struct shminfo64 *shminfo)
1049 int err = security_shm_shmctl(NULL, IPC_INFO);
1051 memset(shminfo, 0, sizeof(*shminfo));
1052 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
1053 shminfo->shmmax = ns->shm_ctlmax;
1054 shminfo->shmall = ns->shm_ctlall;
1055 shminfo->shmmin = SHMMIN;
1056 down_read(&shm_ids(ns).rwsem);
1057 err = ipc_get_maxidx(&shm_ids(ns));
1058 up_read(&shm_ids(ns).rwsem);
1065 static int shmctl_shm_info(struct ipc_namespace *ns,
1066 struct shm_info *shm_info)
1068 int err = security_shm_shmctl(NULL, SHM_INFO);
1070 memset(shm_info, 0, sizeof(*shm_info));
1071 down_read(&shm_ids(ns).rwsem);
1072 shm_info->used_ids = shm_ids(ns).in_use;
1073 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
1074 shm_info->shm_tot = ns->shm_tot;
1075 shm_info->swap_attempts = 0;
1076 shm_info->swap_successes = 0;
1077 err = ipc_get_maxidx(&shm_ids(ns));
1078 up_read(&shm_ids(ns).rwsem);
1085 static int shmctl_stat(struct ipc_namespace *ns, int shmid,
1086 int cmd, struct shmid64_ds *tbuf)
1088 struct shmid_kernel *shp;
1091 memset(tbuf, 0, sizeof(*tbuf));
1094 if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
1095 shp = shm_obtain_object(ns, shmid);
1100 } else { /* IPC_STAT */
1101 shp = shm_obtain_object_check(ns, shmid);
1109 * Semantically SHM_STAT_ANY ought to be identical to
1110 * that functionality provided by the /proc/sysvipc/
1111 * interface. As such, only audit these calls and
1112 * do not do traditional S_IRUGO permission checks on
1115 if (cmd == SHM_STAT_ANY)
1116 audit_ipc_obj(&shp->shm_perm);
1119 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1123 err = security_shm_shmctl(&shp->shm_perm, cmd);
1127 ipc_lock_object(&shp->shm_perm);
1129 if (!ipc_valid_object(&shp->shm_perm)) {
1130 ipc_unlock_object(&shp->shm_perm);
1135 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1136 tbuf->shm_segsz = shp->shm_segsz;
1137 tbuf->shm_atime = shp->shm_atim;
1138 tbuf->shm_dtime = shp->shm_dtim;
1139 tbuf->shm_ctime = shp->shm_ctim;
1140 #ifndef CONFIG_64BIT
1141 tbuf->shm_atime_high = shp->shm_atim >> 32;
1142 tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1143 tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1145 tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
1146 tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
1147 tbuf->shm_nattch = shp->shm_nattch;
1149 if (cmd == IPC_STAT) {
1151 * As defined in SUS:
1152 * Return 0 on success
1157 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1158 * Return the full id, including the sequence number
1160 err = shp->shm_perm.id;
1163 ipc_unlock_object(&shp->shm_perm);
1169 static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1171 struct shmid_kernel *shp;
1172 struct file *shm_file;
1176 shp = shm_obtain_object_check(ns, shmid);
1182 audit_ipc_obj(&(shp->shm_perm));
1183 err = security_shm_shmctl(&shp->shm_perm, cmd);
1187 ipc_lock_object(&shp->shm_perm);
1189 /* check if shm_destroy() is tearing down shp */
1190 if (!ipc_valid_object(&shp->shm_perm)) {
1195 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1196 kuid_t euid = current_euid();
1198 if (!uid_eq(euid, shp->shm_perm.uid) &&
1199 !uid_eq(euid, shp->shm_perm.cuid)) {
1203 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1209 shm_file = shp->shm_file;
1210 if (is_file_hugepages(shm_file))
1213 if (cmd == SHM_LOCK) {
1214 struct ucounts *ucounts = current_ucounts();
1216 err = shmem_lock(shm_file, 1, ucounts);
1217 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1218 shp->shm_perm.mode |= SHM_LOCKED;
1219 shp->mlock_ucounts = ucounts;
1225 if (!(shp->shm_perm.mode & SHM_LOCKED))
1227 shmem_lock(shm_file, 0, shp->mlock_ucounts);
1228 shp->shm_perm.mode &= ~SHM_LOCKED;
1229 shp->mlock_ucounts = NULL;
1231 ipc_unlock_object(&shp->shm_perm);
1233 shmem_unlock_mapping(shm_file->f_mapping);
1239 ipc_unlock_object(&shp->shm_perm);
1245 static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version)
1248 struct ipc_namespace *ns;
1249 struct shmid64_ds sem64;
1251 if (cmd < 0 || shmid < 0)
1254 ns = current->nsproxy->ipc_ns;
1258 struct shminfo64 shminfo;
1259 err = shmctl_ipc_info(ns, &shminfo);
1262 if (copy_shminfo_to_user(buf, &shminfo, version))
1267 struct shm_info shm_info;
1268 err = shmctl_shm_info(ns, &shm_info);
1271 if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1278 err = shmctl_stat(ns, shmid, cmd, &sem64);
1281 if (copy_shmid_to_user(buf, &sem64, version))
1286 if (copy_shmid_from_user(&sem64, buf, version))
1290 return shmctl_down(ns, shmid, cmd, &sem64);
1293 return shmctl_do_lock(ns, shmid, cmd);
1299 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1301 return ksys_shmctl(shmid, cmd, buf, IPC_64);
1304 #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1305 long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1307 int version = ipc_parse_version(&cmd);
1309 return ksys_shmctl(shmid, cmd, buf, version);
1312 SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1314 return ksys_old_shmctl(shmid, cmd, buf);
1318 #ifdef CONFIG_COMPAT
1320 struct compat_shmid_ds {
1321 struct compat_ipc_perm shm_perm;
1323 old_time32_t shm_atime;
1324 old_time32_t shm_dtime;
1325 old_time32_t shm_ctime;
1326 compat_ipc_pid_t shm_cpid;
1327 compat_ipc_pid_t shm_lpid;
1328 unsigned short shm_nattch;
1329 unsigned short shm_unused;
1330 compat_uptr_t shm_unused2;
1331 compat_uptr_t shm_unused3;
1334 struct compat_shminfo64 {
1335 compat_ulong_t shmmax;
1336 compat_ulong_t shmmin;
1337 compat_ulong_t shmmni;
1338 compat_ulong_t shmseg;
1339 compat_ulong_t shmall;
1340 compat_ulong_t __unused1;
1341 compat_ulong_t __unused2;
1342 compat_ulong_t __unused3;
1343 compat_ulong_t __unused4;
1346 struct compat_shm_info {
1347 compat_int_t used_ids;
1348 compat_ulong_t shm_tot, shm_rss, shm_swp;
1349 compat_ulong_t swap_attempts, swap_successes;
1352 static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1355 if (in->shmmax > INT_MAX)
1356 in->shmmax = INT_MAX;
1357 if (version == IPC_64) {
1358 struct compat_shminfo64 info;
1359 memset(&info, 0, sizeof(info));
1360 info.shmmax = in->shmmax;
1361 info.shmmin = in->shmmin;
1362 info.shmmni = in->shmmni;
1363 info.shmseg = in->shmseg;
1364 info.shmall = in->shmall;
1365 return copy_to_user(buf, &info, sizeof(info));
1367 struct shminfo info;
1368 memset(&info, 0, sizeof(info));
1369 info.shmmax = in->shmmax;
1370 info.shmmin = in->shmmin;
1371 info.shmmni = in->shmmni;
1372 info.shmseg = in->shmseg;
1373 info.shmall = in->shmall;
1374 return copy_to_user(buf, &info, sizeof(info));
1378 static int put_compat_shm_info(struct shm_info *ip,
1379 struct compat_shm_info __user *uip)
1381 struct compat_shm_info info;
1383 memset(&info, 0, sizeof(info));
1384 info.used_ids = ip->used_ids;
1385 info.shm_tot = ip->shm_tot;
1386 info.shm_rss = ip->shm_rss;
1387 info.shm_swp = ip->shm_swp;
1388 info.swap_attempts = ip->swap_attempts;
1389 info.swap_successes = ip->swap_successes;
1390 return copy_to_user(uip, &info, sizeof(info));
1393 static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1396 if (version == IPC_64) {
1397 struct compat_shmid64_ds v;
1398 memset(&v, 0, sizeof(v));
1399 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1400 v.shm_atime = lower_32_bits(in->shm_atime);
1401 v.shm_atime_high = upper_32_bits(in->shm_atime);
1402 v.shm_dtime = lower_32_bits(in->shm_dtime);
1403 v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1404 v.shm_ctime = lower_32_bits(in->shm_ctime);
1405 v.shm_ctime_high = upper_32_bits(in->shm_ctime);
1406 v.shm_segsz = in->shm_segsz;
1407 v.shm_nattch = in->shm_nattch;
1408 v.shm_cpid = in->shm_cpid;
1409 v.shm_lpid = in->shm_lpid;
1410 return copy_to_user(buf, &v, sizeof(v));
1412 struct compat_shmid_ds v;
1413 memset(&v, 0, sizeof(v));
1414 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1415 v.shm_perm.key = in->shm_perm.key;
1416 v.shm_atime = in->shm_atime;
1417 v.shm_dtime = in->shm_dtime;
1418 v.shm_ctime = in->shm_ctime;
1419 v.shm_segsz = in->shm_segsz;
1420 v.shm_nattch = in->shm_nattch;
1421 v.shm_cpid = in->shm_cpid;
1422 v.shm_lpid = in->shm_lpid;
1423 return copy_to_user(buf, &v, sizeof(v));
1427 static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1430 memset(out, 0, sizeof(*out));
1431 if (version == IPC_64) {
1432 struct compat_shmid64_ds __user *p = buf;
1433 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1435 struct compat_shmid_ds __user *p = buf;
1436 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1440 static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
1442 struct ipc_namespace *ns;
1443 struct shmid64_ds sem64;
1446 ns = current->nsproxy->ipc_ns;
1448 if (cmd < 0 || shmid < 0)
1453 struct shminfo64 shminfo;
1454 err = shmctl_ipc_info(ns, &shminfo);
1457 if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1462 struct shm_info shm_info;
1463 err = shmctl_shm_info(ns, &shm_info);
1466 if (put_compat_shm_info(&shm_info, uptr))
1473 err = shmctl_stat(ns, shmid, cmd, &sem64);
1476 if (copy_compat_shmid_to_user(uptr, &sem64, version))
1481 if (copy_compat_shmid_from_user(&sem64, uptr, version))
1485 return shmctl_down(ns, shmid, cmd, &sem64);
1488 return shmctl_do_lock(ns, shmid, cmd);
1495 COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1497 return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64);
1500 #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1501 long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr)
1503 int version = compat_ipc_parse_version(&cmd);
1505 return compat_ksys_shmctl(shmid, cmd, uptr, version);
1508 COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr)
1510 return compat_ksys_old_shmctl(shmid, cmd, uptr);
1516 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1518 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1519 * "raddr" thing points to kernel space, and there has to be a wrapper around
1522 long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1523 ulong *raddr, unsigned long shmlba)
1525 struct shmid_kernel *shp;
1526 unsigned long addr = (unsigned long)shmaddr;
1528 struct file *file, *base;
1530 unsigned long flags = MAP_SHARED;
1533 struct ipc_namespace *ns;
1534 struct shm_file_data *sfd;
1536 unsigned long populate = 0;
1543 if (addr & (shmlba - 1)) {
1544 if (shmflg & SHM_RND) {
1545 addr &= ~(shmlba - 1); /* round down */
1548 * Ensure that the round-down is non-nil
1549 * when remapping. This can happen for
1550 * cases when addr < shmlba.
1552 if (!addr && (shmflg & SHM_REMAP))
1555 #ifndef __ARCH_FORCE_SHMLBA
1556 if (addr & ~PAGE_MASK)
1562 } else if ((shmflg & SHM_REMAP))
1565 if (shmflg & SHM_RDONLY) {
1570 prot = PROT_READ | PROT_WRITE;
1571 acc_mode = S_IRUGO | S_IWUGO;
1574 if (shmflg & SHM_EXEC) {
1576 acc_mode |= S_IXUGO;
1580 * We cannot rely on the fs check since SYSV IPC does have an
1581 * additional creator id...
1583 ns = current->nsproxy->ipc_ns;
1585 shp = shm_obtain_object_check(ns, shmid);
1592 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1595 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1599 ipc_lock_object(&shp->shm_perm);
1601 /* check if shm_destroy() is tearing down shp */
1602 if (!ipc_valid_object(&shp->shm_perm)) {
1603 ipc_unlock_object(&shp->shm_perm);
1609 * We need to take a reference to the real shm file to prevent the
1610 * pointer from becoming stale in cases where the lifetime of the outer
1611 * file extends beyond that of the shm segment. It's not usually
1612 * possible, but it can happen during remap_file_pages() emulation as
1613 * that unmaps the memory, then does ->mmap() via file reference only.
1614 * We'll deny the ->mmap() if the shm segment was since removed, but to
1615 * detect shm ID reuse we need to compare the file pointers.
1617 base = get_file(shp->shm_file);
1619 size = i_size_read(file_inode(base));
1620 ipc_unlock_object(&shp->shm_perm);
1624 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1630 file = alloc_file_clone(base, f_flags,
1631 is_file_hugepages(base) ?
1632 &shm_file_operations_huge :
1633 &shm_file_operations);
1634 err = PTR_ERR(file);
1641 sfd->id = shp->shm_perm.id;
1642 sfd->ns = get_ipc_ns(ns);
1645 file->private_data = sfd;
1647 err = security_mmap_file(file, prot, flags);
1651 if (mmap_write_lock_killable(current->mm)) {
1656 if (addr && !(shmflg & SHM_REMAP)) {
1658 if (addr + size < addr)
1661 if (find_vma_intersection(current->mm, addr, addr + size))
1665 addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL);
1668 if (IS_ERR_VALUE(addr))
1671 mmap_write_unlock(current->mm);
1673 mm_populate(addr, populate);
1679 down_write(&shm_ids(ns).rwsem);
1680 shp = shm_lock(ns, shmid);
1683 if (shm_may_destroy(shp))
1684 shm_destroy(ns, shp);
1687 up_write(&shm_ids(ns).rwsem);
1696 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1701 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1704 force_successful_syscall_return();
1708 #ifdef CONFIG_COMPAT
1710 #ifndef COMPAT_SHMLBA
1711 #define COMPAT_SHMLBA SHMLBA
1714 COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1719 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1722 force_successful_syscall_return();
1728 * detach and kill segment if marked destroyed.
1729 * The work is done in shm_close.
1731 long ksys_shmdt(char __user *shmaddr)
1733 struct mm_struct *mm = current->mm;
1734 struct vm_area_struct *vma;
1735 unsigned long addr = (unsigned long)shmaddr;
1736 int retval = -EINVAL;
1740 VMA_ITERATOR(vmi, mm, addr);
1743 if (addr & ~PAGE_MASK)
1746 if (mmap_write_lock_killable(mm))
1750 * This function tries to be smart and unmap shm segments that
1751 * were modified by partial mlock or munmap calls:
1752 * - It first determines the size of the shm segment that should be
1753 * unmapped: It searches for a vma that is backed by shm and that
1754 * started at address shmaddr. It records it's size and then unmaps
1756 * - Then it unmaps all shm vmas that started at shmaddr and that
1757 * are within the initially determined size and that are from the
1758 * same shm segment from which we determined the size.
1759 * Errors from do_munmap are ignored: the function only fails if
1760 * it's called with invalid parameters or if it's called to unmap
1761 * a part of a vma. Both calls in this function are for full vmas,
1762 * the parameters are directly copied from the vma itself and always
1763 * valid - therefore do_munmap cannot fail. (famous last words?)
1766 * If it had been mremap()'d, the starting address would not
1767 * match the usual checks anyway. So assume all vma's are
1768 * above the starting address given.
1772 for_each_vma(vmi, vma) {
1774 * Check if the starting address would match, i.e. it's
1775 * a fragment created by mprotect() and/or munmap(), or it
1776 * otherwise it starts at this address with no hassles.
1778 if ((vma->vm_ops == &shm_vm_ops) &&
1779 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1782 * Record the file of the shm segment being
1783 * unmapped. With mremap(), someone could place
1784 * page from another segment but with equal offsets
1785 * in the range we are unmapping.
1787 file = vma->vm_file;
1788 size = i_size_read(file_inode(vma->vm_file));
1789 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1790 mas_pause(&vmi.mas);
1792 * We discovered the size of the shm segment, so
1793 * break out of here and fall through to the next
1794 * loop that uses the size information to stop
1795 * searching for matching vma's.
1798 vma = vma_next(&vmi);
1804 * We need look no further than the maximum address a fragment
1805 * could possibly have landed at. Also cast things to loff_t to
1806 * prevent overflows and make comparisons vs. equal-width types.
1808 size = PAGE_ALIGN(size);
1809 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1810 /* finding a matching vma now does not alter retval */
1811 if ((vma->vm_ops == &shm_vm_ops) &&
1812 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1813 (vma->vm_file == file)) {
1814 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1815 mas_pause(&vmi.mas);
1818 vma = vma_next(&vmi);
1821 #else /* CONFIG_MMU */
1822 vma = vma_lookup(mm, addr);
1823 /* under NOMMU conditions, the exact address to be destroyed must be
1826 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1827 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1833 mmap_write_unlock(mm);
1837 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1839 return ksys_shmdt(shmaddr);
1842 #ifdef CONFIG_PROC_FS
1843 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1845 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1846 struct user_namespace *user_ns = seq_user_ns(s);
1847 struct kern_ipc_perm *ipcp = it;
1848 struct shmid_kernel *shp;
1849 unsigned long rss = 0, swp = 0;
1851 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1852 shm_add_rss_swap(shp, &rss, &swp);
1854 #if BITS_PER_LONG <= 32
1855 #define SIZE_SPEC "%10lu"
1857 #define SIZE_SPEC "%21lu"
1861 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1862 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1863 SIZE_SPEC " " SIZE_SPEC "\n",
1868 pid_nr_ns(shp->shm_cprid, pid_ns),
1869 pid_nr_ns(shp->shm_lprid, pid_ns),
1871 from_kuid_munged(user_ns, shp->shm_perm.uid),
1872 from_kgid_munged(user_ns, shp->shm_perm.gid),
1873 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1874 from_kgid_munged(user_ns, shp->shm_perm.cgid),