1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/workqueue.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/list.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/idr.h>
11 #include <linux/rculist.h>
12 #include <linux/nsproxy.h>
14 #include <linux/proc_ns.h>
15 #include <linux/file.h>
16 #include <linux/export.h>
17 #include <linux/user_namespace.h>
18 #include <linux/net_namespace.h>
19 #include <linux/sched/task.h>
22 #include <net/netlink.h>
23 #include <net/net_namespace.h>
24 #include <net/netns/generic.h>
27 * Our network namespace constructor/destructor lists
30 static LIST_HEAD(pernet_list);
31 static struct list_head *first_device = &pernet_list;
32 DEFINE_MUTEX(net_mutex);
34 LIST_HEAD(net_namespace_list);
35 EXPORT_SYMBOL_GPL(net_namespace_list);
37 struct net init_net = {
38 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
40 EXPORT_SYMBOL(init_net);
42 static bool init_net_initialized;
44 #define MIN_PERNET_OPS_ID \
45 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
47 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
49 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
51 static struct net_generic *net_alloc_generic(void)
53 struct net_generic *ng;
54 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
56 ng = kzalloc(generic_size, GFP_KERNEL);
58 ng->s.len = max_gen_ptrs;
63 static int net_assign_generic(struct net *net, unsigned int id, void *data)
65 struct net_generic *ng, *old_ng;
67 BUG_ON(!mutex_is_locked(&net_mutex));
68 BUG_ON(id < MIN_PERNET_OPS_ID);
70 old_ng = rcu_dereference_protected(net->gen,
71 lockdep_is_held(&net_mutex));
72 if (old_ng->s.len > id) {
73 old_ng->ptr[id] = data;
77 ng = net_alloc_generic();
82 * Some synchronisation notes:
84 * The net_generic explores the net->gen array inside rcu
85 * read section. Besides once set the net->gen->ptr[x]
86 * pointer never changes (see rules in netns/generic.h).
88 * That said, we simply duplicate this array and schedule
89 * the old copy for kfree after a grace period.
92 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
93 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
96 rcu_assign_pointer(net->gen, ng);
97 kfree_rcu(old_ng, s.rcu);
101 static int ops_init(const struct pernet_operations *ops, struct net *net)
106 if (ops->id && ops->size) {
107 data = kzalloc(ops->size, GFP_KERNEL);
111 err = net_assign_generic(net, *ops->id, data);
117 err = ops->init(net);
128 static void ops_free(const struct pernet_operations *ops, struct net *net)
130 if (ops->id && ops->size) {
131 kfree(net_generic(net, *ops->id));
135 static void ops_exit_list(const struct pernet_operations *ops,
136 struct list_head *net_exit_list)
140 list_for_each_entry(net, net_exit_list, exit_list)
144 ops->exit_batch(net_exit_list);
147 static void ops_free_list(const struct pernet_operations *ops,
148 struct list_head *net_exit_list)
151 if (ops->size && ops->id) {
152 list_for_each_entry(net, net_exit_list, exit_list)
157 /* should be called with nsid_lock held */
158 static int alloc_netid(struct net *net, struct net *peer, int reqid)
160 int min = 0, max = 0;
167 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
170 /* This function is used by idr_for_each(). If net is equal to peer, the
171 * function returns the id so that idr_for_each() stops. Because we cannot
172 * returns the id 0 (idr_for_each() will not stop), we return the magic value
173 * NET_ID_ZERO (-1) for it.
175 #define NET_ID_ZERO -1
176 static int net_eq_idr(int id, void *net, void *peer)
178 if (net_eq(net, peer))
179 return id ? : NET_ID_ZERO;
183 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
184 * is set to true, thus the caller knows that the new id must be notified via
187 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
189 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
190 bool alloc_it = *alloc;
194 /* Magic value for id 0. */
195 if (id == NET_ID_ZERO)
201 id = alloc_netid(net, peer, -1);
203 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
206 return NETNSA_NSID_NOT_ASSIGNED;
209 /* should be called with nsid_lock held */
210 static int __peernet2id(struct net *net, struct net *peer)
214 return __peernet2id_alloc(net, peer, &no);
217 static void rtnl_net_notifyid(struct net *net, int cmd, int id);
218 /* This function returns the id of a peer netns. If no id is assigned, one will
219 * be allocated and returned.
221 int peernet2id_alloc(struct net *net, struct net *peer)
226 if (atomic_read(&net->count) == 0)
227 return NETNSA_NSID_NOT_ASSIGNED;
228 spin_lock_bh(&net->nsid_lock);
229 alloc = atomic_read(&peer->count) == 0 ? false : true;
230 id = __peernet2id_alloc(net, peer, &alloc);
231 spin_unlock_bh(&net->nsid_lock);
232 if (alloc && id >= 0)
233 rtnl_net_notifyid(net, RTM_NEWNSID, id);
237 /* This function returns, if assigned, the id of a peer netns. */
238 int peernet2id(struct net *net, struct net *peer)
242 spin_lock_bh(&net->nsid_lock);
243 id = __peernet2id(net, peer);
244 spin_unlock_bh(&net->nsid_lock);
247 EXPORT_SYMBOL(peernet2id);
249 /* This function returns true is the peer netns has an id assigned into the
252 bool peernet_has_id(struct net *net, struct net *peer)
254 return peernet2id(net, peer) >= 0;
257 struct net *get_net_ns_by_id(struct net *net, int id)
265 spin_lock_bh(&net->nsid_lock);
266 peer = idr_find(&net->netns_ids, id);
269 spin_unlock_bh(&net->nsid_lock);
276 * setup_net runs the initializers for the network namespace object.
278 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
280 /* Must be called with net_mutex held */
281 const struct pernet_operations *ops, *saved_ops;
283 LIST_HEAD(net_exit_list);
285 atomic_set(&net->count, 1);
286 atomic_set(&net->passive, 1);
287 net->dev_base_seq = 1;
288 net->user_ns = user_ns;
289 idr_init(&net->netns_ids);
290 spin_lock_init(&net->nsid_lock);
292 list_for_each_entry(ops, &pernet_list, list) {
293 error = ops_init(ops, net);
301 /* Walk through the list backwards calling the exit functions
302 * for the pernet modules whose init functions did not fail.
304 list_add(&net->exit_list, &net_exit_list);
306 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
307 ops_exit_list(ops, &net_exit_list);
310 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
311 ops_free_list(ops, &net_exit_list);
319 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
321 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
324 static void dec_net_namespaces(struct ucounts *ucounts)
326 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
329 static struct kmem_cache *net_cachep;
330 static struct workqueue_struct *netns_wq;
332 static struct net *net_alloc(void)
334 struct net *net = NULL;
335 struct net_generic *ng;
337 ng = net_alloc_generic();
341 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
345 rcu_assign_pointer(net->gen, ng);
354 static void net_free(struct net *net)
356 kfree(rcu_access_pointer(net->gen));
357 kmem_cache_free(net_cachep, net);
360 void net_drop_ns(void *p)
363 if (ns && atomic_dec_and_test(&ns->passive))
367 struct net *copy_net_ns(unsigned long flags,
368 struct user_namespace *user_ns, struct net *old_net)
370 struct ucounts *ucounts;
374 if (!(flags & CLONE_NEWNET))
375 return get_net(old_net);
377 ucounts = inc_net_namespaces(user_ns);
379 return ERR_PTR(-ENOSPC);
383 dec_net_namespaces(ucounts);
384 return ERR_PTR(-ENOMEM);
387 get_user_ns(user_ns);
389 rv = mutex_lock_killable(&net_mutex);
392 dec_net_namespaces(ucounts);
393 put_user_ns(user_ns);
397 net->ucounts = ucounts;
398 rv = setup_net(net, user_ns);
401 list_add_tail_rcu(&net->list, &net_namespace_list);
404 mutex_unlock(&net_mutex);
406 dec_net_namespaces(ucounts);
407 put_user_ns(user_ns);
414 static DEFINE_SPINLOCK(cleanup_list_lock);
415 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
417 static void cleanup_net(struct work_struct *work)
419 const struct pernet_operations *ops;
420 struct net *net, *tmp;
421 struct list_head net_kill_list;
422 LIST_HEAD(net_exit_list);
424 /* Atomically snapshot the list of namespaces to cleanup */
425 spin_lock_irq(&cleanup_list_lock);
426 list_replace_init(&cleanup_list, &net_kill_list);
427 spin_unlock_irq(&cleanup_list_lock);
429 mutex_lock(&net_mutex);
431 /* Don't let anyone else find us. */
433 list_for_each_entry(net, &net_kill_list, cleanup_list) {
434 list_del_rcu(&net->list);
435 list_add_tail(&net->exit_list, &net_exit_list);
439 spin_lock_bh(&tmp->nsid_lock);
440 id = __peernet2id(tmp, net);
442 idr_remove(&tmp->netns_ids, id);
443 spin_unlock_bh(&tmp->nsid_lock);
445 rtnl_net_notifyid(tmp, RTM_DELNSID, id);
447 spin_lock_bh(&net->nsid_lock);
448 idr_destroy(&net->netns_ids);
449 spin_unlock_bh(&net->nsid_lock);
455 * Another CPU might be rcu-iterating the list, wait for it.
456 * This needs to be before calling the exit() notifiers, so
457 * the rcu_barrier() below isn't sufficient alone.
461 /* Run all of the network namespace exit methods */
462 list_for_each_entry_reverse(ops, &pernet_list, list)
463 ops_exit_list(ops, &net_exit_list);
465 /* Free the net generic variables */
466 list_for_each_entry_reverse(ops, &pernet_list, list)
467 ops_free_list(ops, &net_exit_list);
469 mutex_unlock(&net_mutex);
471 /* Ensure there are no outstanding rcu callbacks using this
476 /* Finally it is safe to free my network namespace structure */
477 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
478 list_del_init(&net->exit_list);
479 dec_net_namespaces(net->ucounts);
480 put_user_ns(net->user_ns);
484 static DECLARE_WORK(net_cleanup_work, cleanup_net);
486 void __put_net(struct net *net)
488 /* Cleanup the network namespace in process context */
491 spin_lock_irqsave(&cleanup_list_lock, flags);
492 list_add(&net->cleanup_list, &cleanup_list);
493 spin_unlock_irqrestore(&cleanup_list_lock, flags);
495 queue_work(netns_wq, &net_cleanup_work);
497 EXPORT_SYMBOL_GPL(__put_net);
499 struct net *get_net_ns_by_fd(int fd)
502 struct ns_common *ns;
505 file = proc_ns_fget(fd);
507 return ERR_CAST(file);
509 ns = get_proc_ns(file_inode(file));
510 if (ns->ops == &netns_operations)
511 net = get_net(container_of(ns, struct net, ns));
513 net = ERR_PTR(-EINVAL);
520 struct net *get_net_ns_by_fd(int fd)
522 return ERR_PTR(-EINVAL);
525 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
527 struct net *get_net_ns_by_pid(pid_t pid)
529 struct task_struct *tsk;
532 /* Lookup the network namespace */
533 net = ERR_PTR(-ESRCH);
535 tsk = find_task_by_vpid(pid);
537 struct nsproxy *nsproxy;
539 nsproxy = tsk->nsproxy;
541 net = get_net(nsproxy->net_ns);
547 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
549 static __net_init int net_ns_net_init(struct net *net)
552 net->ns.ops = &netns_operations;
554 return ns_alloc_inum(&net->ns);
557 static __net_exit void net_ns_net_exit(struct net *net)
559 ns_free_inum(&net->ns);
562 static struct pernet_operations __net_initdata net_ns_ops = {
563 .init = net_ns_net_init,
564 .exit = net_ns_net_exit,
567 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
568 [NETNSA_NONE] = { .type = NLA_UNSPEC },
569 [NETNSA_NSID] = { .type = NLA_S32 },
570 [NETNSA_PID] = { .type = NLA_U32 },
571 [NETNSA_FD] = { .type = NLA_U32 },
574 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
575 struct netlink_ext_ack *extack)
577 struct net *net = sock_net(skb->sk);
578 struct nlattr *tb[NETNSA_MAX + 1];
582 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
583 rtnl_net_policy, extack);
586 if (!tb[NETNSA_NSID])
588 nsid = nla_get_s32(tb[NETNSA_NSID]);
591 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
592 else if (tb[NETNSA_FD])
593 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
597 return PTR_ERR(peer);
599 spin_lock_bh(&net->nsid_lock);
600 if (__peernet2id(net, peer) >= 0) {
601 spin_unlock_bh(&net->nsid_lock);
606 err = alloc_netid(net, peer, nsid);
607 spin_unlock_bh(&net->nsid_lock);
609 rtnl_net_notifyid(net, RTM_NEWNSID, err);
617 static int rtnl_net_get_size(void)
619 return NLMSG_ALIGN(sizeof(struct rtgenmsg))
620 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
624 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
625 int cmd, struct net *net, int nsid)
627 struct nlmsghdr *nlh;
628 struct rtgenmsg *rth;
630 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
634 rth = nlmsg_data(nlh);
635 rth->rtgen_family = AF_UNSPEC;
637 if (nla_put_s32(skb, NETNSA_NSID, nsid))
638 goto nla_put_failure;
644 nlmsg_cancel(skb, nlh);
648 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
649 struct netlink_ext_ack *extack)
651 struct net *net = sock_net(skb->sk);
652 struct nlattr *tb[NETNSA_MAX + 1];
657 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
658 rtnl_net_policy, extack);
662 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
663 else if (tb[NETNSA_FD])
664 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
669 return PTR_ERR(peer);
671 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
677 id = peernet2id(net, peer);
678 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
679 RTM_NEWNSID, net, id);
683 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
693 struct rtnl_net_dump_cb {
696 struct netlink_callback *cb;
701 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
703 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
706 if (net_cb->idx < net_cb->s_idx)
709 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
710 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
711 RTM_NEWNSID, net_cb->net, id);
720 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
722 struct net *net = sock_net(skb->sk);
723 struct rtnl_net_dump_cb net_cb = {
728 .s_idx = cb->args[0],
731 spin_lock_bh(&net->nsid_lock);
732 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
733 spin_unlock_bh(&net->nsid_lock);
735 cb->args[0] = net_cb.idx;
739 static void rtnl_net_notifyid(struct net *net, int cmd, int id)
744 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
748 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
752 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
758 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
761 static int __init net_ns_init(void)
763 struct net_generic *ng;
766 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
770 /* Create workqueue for cleanup */
771 netns_wq = create_singlethread_workqueue("netns");
773 panic("Could not create netns workq");
776 ng = net_alloc_generic();
778 panic("Could not allocate generic netns");
780 rcu_assign_pointer(init_net.gen, ng);
782 mutex_lock(&net_mutex);
783 if (setup_net(&init_net, &init_user_ns))
784 panic("Could not setup the initial network namespace");
786 init_net_initialized = true;
789 list_add_tail_rcu(&init_net.list, &net_namespace_list);
792 mutex_unlock(&net_mutex);
794 register_pernet_subsys(&net_ns_ops);
796 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
797 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
803 pure_initcall(net_ns_init);
806 static int __register_pernet_operations(struct list_head *list,
807 struct pernet_operations *ops)
811 LIST_HEAD(net_exit_list);
813 list_add_tail(&ops->list, list);
814 if (ops->init || (ops->id && ops->size)) {
816 error = ops_init(ops, net);
819 list_add_tail(&net->exit_list, &net_exit_list);
825 /* If I have an error cleanup all namespaces I initialized */
826 list_del(&ops->list);
827 ops_exit_list(ops, &net_exit_list);
828 ops_free_list(ops, &net_exit_list);
832 static void __unregister_pernet_operations(struct pernet_operations *ops)
835 LIST_HEAD(net_exit_list);
837 list_del(&ops->list);
839 list_add_tail(&net->exit_list, &net_exit_list);
840 ops_exit_list(ops, &net_exit_list);
841 ops_free_list(ops, &net_exit_list);
846 static int __register_pernet_operations(struct list_head *list,
847 struct pernet_operations *ops)
849 if (!init_net_initialized) {
850 list_add_tail(&ops->list, list);
854 return ops_init(ops, &init_net);
857 static void __unregister_pernet_operations(struct pernet_operations *ops)
859 if (!init_net_initialized) {
860 list_del(&ops->list);
862 LIST_HEAD(net_exit_list);
863 list_add(&init_net.exit_list, &net_exit_list);
864 ops_exit_list(ops, &net_exit_list);
865 ops_free_list(ops, &net_exit_list);
869 #endif /* CONFIG_NET_NS */
871 static DEFINE_IDA(net_generic_ids);
873 static int register_pernet_operations(struct list_head *list,
874 struct pernet_operations *ops)
880 error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
882 if (error == -EAGAIN) {
883 ida_pre_get(&net_generic_ids, GFP_KERNEL);
888 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
890 error = __register_pernet_operations(list, ops);
894 ida_remove(&net_generic_ids, *ops->id);
900 static void unregister_pernet_operations(struct pernet_operations *ops)
903 __unregister_pernet_operations(ops);
906 ida_remove(&net_generic_ids, *ops->id);
910 * register_pernet_subsys - register a network namespace subsystem
911 * @ops: pernet operations structure for the subsystem
913 * Register a subsystem which has init and exit functions
914 * that are called when network namespaces are created and
915 * destroyed respectively.
917 * When registered all network namespace init functions are
918 * called for every existing network namespace. Allowing kernel
919 * modules to have a race free view of the set of network namespaces.
921 * When a new network namespace is created all of the init
922 * methods are called in the order in which they were registered.
924 * When a network namespace is destroyed all of the exit methods
925 * are called in the reverse of the order with which they were
928 int register_pernet_subsys(struct pernet_operations *ops)
931 mutex_lock(&net_mutex);
932 error = register_pernet_operations(first_device, ops);
933 mutex_unlock(&net_mutex);
936 EXPORT_SYMBOL_GPL(register_pernet_subsys);
939 * unregister_pernet_subsys - unregister a network namespace subsystem
940 * @ops: pernet operations structure to manipulate
942 * Remove the pernet operations structure from the list to be
943 * used when network namespaces are created or destroyed. In
944 * addition run the exit method for all existing network
947 void unregister_pernet_subsys(struct pernet_operations *ops)
949 mutex_lock(&net_mutex);
950 unregister_pernet_operations(ops);
951 mutex_unlock(&net_mutex);
953 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
956 * register_pernet_device - register a network namespace device
957 * @ops: pernet operations structure for the subsystem
959 * Register a device which has init and exit functions
960 * that are called when network namespaces are created and
961 * destroyed respectively.
963 * When registered all network namespace init functions are
964 * called for every existing network namespace. Allowing kernel
965 * modules to have a race free view of the set of network namespaces.
967 * When a new network namespace is created all of the init
968 * methods are called in the order in which they were registered.
970 * When a network namespace is destroyed all of the exit methods
971 * are called in the reverse of the order with which they were
974 int register_pernet_device(struct pernet_operations *ops)
977 mutex_lock(&net_mutex);
978 error = register_pernet_operations(&pernet_list, ops);
979 if (!error && (first_device == &pernet_list))
980 first_device = &ops->list;
981 mutex_unlock(&net_mutex);
984 EXPORT_SYMBOL_GPL(register_pernet_device);
987 * unregister_pernet_device - unregister a network namespace netdevice
988 * @ops: pernet operations structure to manipulate
990 * Remove the pernet operations structure from the list to be
991 * used when network namespaces are created or destroyed. In
992 * addition run the exit method for all existing network
995 void unregister_pernet_device(struct pernet_operations *ops)
997 mutex_lock(&net_mutex);
998 if (&ops->list == first_device)
999 first_device = first_device->next;
1000 unregister_pernet_operations(ops);
1001 mutex_unlock(&net_mutex);
1003 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1005 #ifdef CONFIG_NET_NS
1006 static struct ns_common *netns_get(struct task_struct *task)
1008 struct net *net = NULL;
1009 struct nsproxy *nsproxy;
1012 nsproxy = task->nsproxy;
1014 net = get_net(nsproxy->net_ns);
1017 return net ? &net->ns : NULL;
1020 static inline struct net *to_net_ns(struct ns_common *ns)
1022 return container_of(ns, struct net, ns);
1025 static void netns_put(struct ns_common *ns)
1027 put_net(to_net_ns(ns));
1030 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1032 struct net *net = to_net_ns(ns);
1034 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1035 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1038 put_net(nsproxy->net_ns);
1039 nsproxy->net_ns = get_net(net);
1043 static struct user_namespace *netns_owner(struct ns_common *ns)
1045 return to_net_ns(ns)->user_ns;
1048 const struct proc_ns_operations netns_operations = {
1050 .type = CLONE_NEWNET,
1053 .install = netns_install,
1054 .owner = netns_owner,