2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/mutex.h>
59 #include <net/net_namespace.h>
62 #include <net/netlink.h>
64 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
65 #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
68 /* struct sock has to be the first member of netlink_sock */
76 unsigned long *groups;
78 wait_queue_head_t wait;
79 struct netlink_callback *cb;
80 struct mutex *cb_mutex;
81 struct mutex cb_def_mutex;
82 void (*netlink_rcv)(struct sk_buff *skb);
83 struct module *module;
86 #define NETLINK_KERNEL_SOCKET 0x1
87 #define NETLINK_RECV_PKTINFO 0x2
88 #define NETLINK_BROADCAST_SEND_ERROR 0x4
90 static inline struct netlink_sock *nlk_sk(struct sock *sk)
92 return container_of(sk, struct netlink_sock, sk);
95 static inline int netlink_is_kernel(struct sock *sk)
97 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
101 struct hlist_head *table;
102 unsigned long rehash_time;
107 unsigned int entries;
108 unsigned int max_shift;
113 struct netlink_table {
114 struct nl_pid_hash hash;
115 struct hlist_head mc_list;
116 unsigned long *listeners;
117 unsigned int nl_nonroot;
119 struct mutex *cb_mutex;
120 struct module *module;
124 static struct netlink_table *nl_table;
126 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
128 static int netlink_dump(struct sock *sk);
129 static void netlink_destroy_callback(struct netlink_callback *cb);
131 static DEFINE_RWLOCK(nl_table_lock);
132 static atomic_t nl_table_users = ATOMIC_INIT(0);
134 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
136 static u32 netlink_group_mask(u32 group)
138 return group ? 1 << (group - 1) : 0;
141 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
143 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
146 static void netlink_sock_destruct(struct sock *sk)
148 struct netlink_sock *nlk = nlk_sk(sk);
152 nlk->cb->done(nlk->cb);
153 netlink_destroy_callback(nlk->cb);
156 skb_queue_purge(&sk->sk_receive_queue);
158 if (!sock_flag(sk, SOCK_DEAD)) {
159 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
163 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
164 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
165 WARN_ON(nlk_sk(sk)->groups);
168 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
169 * SMP. Look, when several writers sleep and reader wakes them up, all but one
170 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
171 * this, _but_ remember, it adds useless work on UP machines.
174 static void netlink_table_grab(void)
175 __acquires(nl_table_lock)
177 write_lock_irq(&nl_table_lock);
179 if (atomic_read(&nl_table_users)) {
180 DECLARE_WAITQUEUE(wait, current);
182 add_wait_queue_exclusive(&nl_table_wait, &wait);
184 set_current_state(TASK_UNINTERRUPTIBLE);
185 if (atomic_read(&nl_table_users) == 0)
187 write_unlock_irq(&nl_table_lock);
189 write_lock_irq(&nl_table_lock);
192 __set_current_state(TASK_RUNNING);
193 remove_wait_queue(&nl_table_wait, &wait);
197 static void netlink_table_ungrab(void)
198 __releases(nl_table_lock)
200 write_unlock_irq(&nl_table_lock);
201 wake_up(&nl_table_wait);
205 netlink_lock_table(void)
207 /* read_lock() synchronizes us to netlink_table_grab */
209 read_lock(&nl_table_lock);
210 atomic_inc(&nl_table_users);
211 read_unlock(&nl_table_lock);
215 netlink_unlock_table(void)
217 if (atomic_dec_and_test(&nl_table_users))
218 wake_up(&nl_table_wait);
221 static inline struct sock *netlink_lookup(struct net *net, int protocol,
224 struct nl_pid_hash *hash = &nl_table[protocol].hash;
225 struct hlist_head *head;
227 struct hlist_node *node;
229 read_lock(&nl_table_lock);
230 head = nl_pid_hashfn(hash, pid);
231 sk_for_each(sk, node, head) {
232 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
239 read_unlock(&nl_table_lock);
243 static inline struct hlist_head *nl_pid_hash_zalloc(size_t size)
245 if (size <= PAGE_SIZE)
246 return kzalloc(size, GFP_ATOMIC);
248 return (struct hlist_head *)
249 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
253 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
255 if (size <= PAGE_SIZE)
258 free_pages((unsigned long)table, get_order(size));
261 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
263 unsigned int omask, mask, shift;
265 struct hlist_head *otable, *table;
268 omask = mask = hash->mask;
269 osize = size = (mask + 1) * sizeof(*table);
273 if (++shift > hash->max_shift)
279 table = nl_pid_hash_zalloc(size);
283 otable = hash->table;
287 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
289 for (i = 0; i <= omask; i++) {
291 struct hlist_node *node, *tmp;
293 sk_for_each_safe(sk, node, tmp, &otable[i])
294 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
297 nl_pid_hash_free(otable, osize);
298 hash->rehash_time = jiffies + 10 * 60 * HZ;
302 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
304 int avg = hash->entries >> hash->shift;
306 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
309 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
310 nl_pid_hash_rehash(hash, 0);
317 static const struct proto_ops netlink_ops;
320 netlink_update_listeners(struct sock *sk)
322 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
323 struct hlist_node *node;
327 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
329 sk_for_each_bound(sk, node, &tbl->mc_list) {
330 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
331 mask |= nlk_sk(sk)->groups[i];
333 tbl->listeners[i] = mask;
335 /* this function is only called with the netlink table "grabbed", which
336 * makes sure updates are visible before bind or setsockopt return. */
339 static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
341 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
342 struct hlist_head *head;
343 int err = -EADDRINUSE;
345 struct hlist_node *node;
348 netlink_table_grab();
349 head = nl_pid_hashfn(hash, pid);
351 sk_for_each(osk, node, head) {
352 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
364 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
367 if (len && nl_pid_hash_dilute(hash, len))
368 head = nl_pid_hashfn(hash, pid);
370 nlk_sk(sk)->pid = pid;
371 sk_add_node(sk, head);
375 netlink_table_ungrab();
379 static void netlink_remove(struct sock *sk)
381 netlink_table_grab();
382 if (sk_del_node_init(sk))
383 nl_table[sk->sk_protocol].hash.entries--;
384 if (nlk_sk(sk)->subscriptions)
385 __sk_del_bind_node(sk);
386 netlink_table_ungrab();
389 static struct proto netlink_proto = {
391 .owner = THIS_MODULE,
392 .obj_size = sizeof(struct netlink_sock),
395 static int __netlink_create(struct net *net, struct socket *sock,
396 struct mutex *cb_mutex, int protocol)
399 struct netlink_sock *nlk;
401 sock->ops = &netlink_ops;
403 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
407 sock_init_data(sock, sk);
411 nlk->cb_mutex = cb_mutex;
413 nlk->cb_mutex = &nlk->cb_def_mutex;
414 mutex_init(nlk->cb_mutex);
416 init_waitqueue_head(&nlk->wait);
418 sk->sk_destruct = netlink_sock_destruct;
419 sk->sk_protocol = protocol;
423 static int netlink_create(struct net *net, struct socket *sock, int protocol)
425 struct module *module = NULL;
426 struct mutex *cb_mutex;
427 struct netlink_sock *nlk;
430 sock->state = SS_UNCONNECTED;
432 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
433 return -ESOCKTNOSUPPORT;
435 if (protocol < 0 || protocol >= MAX_LINKS)
436 return -EPROTONOSUPPORT;
438 netlink_lock_table();
439 #ifdef CONFIG_MODULES
440 if (!nl_table[protocol].registered) {
441 netlink_unlock_table();
442 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
443 netlink_lock_table();
446 if (nl_table[protocol].registered &&
447 try_module_get(nl_table[protocol].module))
448 module = nl_table[protocol].module;
449 cb_mutex = nl_table[protocol].cb_mutex;
450 netlink_unlock_table();
452 err = __netlink_create(net, sock, cb_mutex, protocol);
457 sock_prot_inuse_add(net, &netlink_proto, 1);
460 nlk = nlk_sk(sock->sk);
461 nlk->module = module;
470 static int netlink_release(struct socket *sock)
472 struct sock *sk = sock->sk;
473 struct netlink_sock *nlk;
483 * OK. Socket is unlinked, any packets that arrive now
488 wake_up_interruptible_all(&nlk->wait);
490 skb_queue_purge(&sk->sk_write_queue);
492 if (nlk->pid && !nlk->subscriptions) {
493 struct netlink_notify n = {
495 .protocol = sk->sk_protocol,
498 atomic_notifier_call_chain(&netlink_chain,
499 NETLINK_URELEASE, &n);
502 module_put(nlk->module);
504 netlink_table_grab();
505 if (netlink_is_kernel(sk)) {
506 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
507 if (--nl_table[sk->sk_protocol].registered == 0) {
508 kfree(nl_table[sk->sk_protocol].listeners);
509 nl_table[sk->sk_protocol].module = NULL;
510 nl_table[sk->sk_protocol].registered = 0;
512 } else if (nlk->subscriptions)
513 netlink_update_listeners(sk);
514 netlink_table_ungrab();
520 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
526 static int netlink_autobind(struct socket *sock)
528 struct sock *sk = sock->sk;
529 struct net *net = sock_net(sk);
530 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
531 struct hlist_head *head;
533 struct hlist_node *node;
534 s32 pid = current->tgid;
536 static s32 rover = -4097;
540 netlink_table_grab();
541 head = nl_pid_hashfn(hash, pid);
542 sk_for_each(osk, node, head) {
543 if (!net_eq(sock_net(osk), net))
545 if (nlk_sk(osk)->pid == pid) {
546 /* Bind collision, search negative pid values. */
550 netlink_table_ungrab();
554 netlink_table_ungrab();
556 err = netlink_insert(sk, net, pid);
557 if (err == -EADDRINUSE)
560 /* If 2 threads race to autobind, that is fine. */
567 static inline int netlink_capable(struct socket *sock, unsigned int flag)
569 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
570 capable(CAP_NET_ADMIN);
574 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
576 struct netlink_sock *nlk = nlk_sk(sk);
578 if (nlk->subscriptions && !subscriptions)
579 __sk_del_bind_node(sk);
580 else if (!nlk->subscriptions && subscriptions)
581 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
582 nlk->subscriptions = subscriptions;
585 static int netlink_realloc_groups(struct sock *sk)
587 struct netlink_sock *nlk = nlk_sk(sk);
589 unsigned long *new_groups;
592 netlink_table_grab();
594 groups = nl_table[sk->sk_protocol].groups;
595 if (!nl_table[sk->sk_protocol].registered) {
600 if (nlk->ngroups >= groups)
603 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
604 if (new_groups == NULL) {
608 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
609 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
611 nlk->groups = new_groups;
612 nlk->ngroups = groups;
614 netlink_table_ungrab();
618 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
621 struct sock *sk = sock->sk;
622 struct net *net = sock_net(sk);
623 struct netlink_sock *nlk = nlk_sk(sk);
624 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
627 if (nladdr->nl_family != AF_NETLINK)
630 /* Only superuser is allowed to listen multicasts */
631 if (nladdr->nl_groups) {
632 if (!netlink_capable(sock, NL_NONROOT_RECV))
634 err = netlink_realloc_groups(sk);
640 if (nladdr->nl_pid != nlk->pid)
643 err = nladdr->nl_pid ?
644 netlink_insert(sk, net, nladdr->nl_pid) :
645 netlink_autobind(sock);
650 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
653 netlink_table_grab();
654 netlink_update_subscriptions(sk, nlk->subscriptions +
655 hweight32(nladdr->nl_groups) -
656 hweight32(nlk->groups[0]));
657 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
658 netlink_update_listeners(sk);
659 netlink_table_ungrab();
664 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
668 struct sock *sk = sock->sk;
669 struct netlink_sock *nlk = nlk_sk(sk);
670 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
672 if (addr->sa_family == AF_UNSPEC) {
673 sk->sk_state = NETLINK_UNCONNECTED;
678 if (addr->sa_family != AF_NETLINK)
681 /* Only superuser is allowed to send multicasts */
682 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
686 err = netlink_autobind(sock);
689 sk->sk_state = NETLINK_CONNECTED;
690 nlk->dst_pid = nladdr->nl_pid;
691 nlk->dst_group = ffs(nladdr->nl_groups);
697 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
698 int *addr_len, int peer)
700 struct sock *sk = sock->sk;
701 struct netlink_sock *nlk = nlk_sk(sk);
702 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
704 nladdr->nl_family = AF_NETLINK;
706 *addr_len = sizeof(*nladdr);
709 nladdr->nl_pid = nlk->dst_pid;
710 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
712 nladdr->nl_pid = nlk->pid;
713 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
718 static void netlink_overrun(struct sock *sk)
720 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
721 sk->sk_err = ENOBUFS;
722 sk->sk_error_report(sk);
726 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
729 struct netlink_sock *nlk;
731 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid);
733 return ERR_PTR(-ECONNREFUSED);
735 /* Don't bother queuing skb if kernel socket has no input function */
737 if (sock->sk_state == NETLINK_CONNECTED &&
738 nlk->dst_pid != nlk_sk(ssk)->pid) {
740 return ERR_PTR(-ECONNREFUSED);
745 struct sock *netlink_getsockbyfilp(struct file *filp)
747 struct inode *inode = filp->f_path.dentry->d_inode;
750 if (!S_ISSOCK(inode->i_mode))
751 return ERR_PTR(-ENOTSOCK);
753 sock = SOCKET_I(inode)->sk;
754 if (sock->sk_family != AF_NETLINK)
755 return ERR_PTR(-EINVAL);
762 * Attach a skb to a netlink socket.
763 * The caller must hold a reference to the destination socket. On error, the
764 * reference is dropped. The skb is not send to the destination, just all
765 * all error checks are performed and memory in the queue is reserved.
767 * < 0: error. skb freed, reference to sock dropped.
769 * 1: repeat lookup - reference dropped while waiting for socket memory.
771 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
772 long *timeo, struct sock *ssk)
774 struct netlink_sock *nlk;
778 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
779 test_bit(0, &nlk->state)) {
780 DECLARE_WAITQUEUE(wait, current);
782 if (!ssk || netlink_is_kernel(ssk))
789 __set_current_state(TASK_INTERRUPTIBLE);
790 add_wait_queue(&nlk->wait, &wait);
792 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
793 test_bit(0, &nlk->state)) &&
794 !sock_flag(sk, SOCK_DEAD))
795 *timeo = schedule_timeout(*timeo);
797 __set_current_state(TASK_RUNNING);
798 remove_wait_queue(&nlk->wait, &wait);
801 if (signal_pending(current)) {
803 return sock_intr_errno(*timeo);
807 skb_set_owner_r(skb, sk);
811 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
815 skb_queue_tail(&sk->sk_receive_queue, skb);
816 sk->sk_data_ready(sk, len);
821 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
827 static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
834 delta = skb->end - skb->tail;
835 if (delta * 2 < skb->truesize)
838 if (skb_shared(skb)) {
839 struct sk_buff *nskb = skb_clone(skb, allocation);
846 if (!pskb_expand_head(skb, 0, -delta, allocation))
847 skb->truesize -= delta;
852 static inline void netlink_rcv_wake(struct sock *sk)
854 struct netlink_sock *nlk = nlk_sk(sk);
856 if (skb_queue_empty(&sk->sk_receive_queue))
857 clear_bit(0, &nlk->state);
858 if (!test_bit(0, &nlk->state))
859 wake_up_interruptible(&nlk->wait);
862 static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
865 struct netlink_sock *nlk = nlk_sk(sk);
868 if (nlk->netlink_rcv != NULL) {
870 skb_set_owner_r(skb, sk);
871 nlk->netlink_rcv(skb);
878 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
879 u32 pid, int nonblock)
885 skb = netlink_trim(skb, gfp_any());
887 timeo = sock_sndtimeo(ssk, nonblock);
889 sk = netlink_getsockbypid(ssk, pid);
894 if (netlink_is_kernel(sk))
895 return netlink_unicast_kernel(sk, skb);
897 if (sk_filter(sk, skb)) {
904 err = netlink_attachskb(sk, skb, &timeo, ssk);
910 return netlink_sendskb(sk, skb);
912 EXPORT_SYMBOL(netlink_unicast);
914 int netlink_has_listeners(struct sock *sk, unsigned int group)
917 unsigned long *listeners;
919 BUG_ON(!netlink_is_kernel(sk));
922 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
924 if (group - 1 < nl_table[sk->sk_protocol].groups)
925 res = test_bit(group - 1, listeners);
931 EXPORT_SYMBOL_GPL(netlink_has_listeners);
933 static inline int netlink_broadcast_deliver(struct sock *sk,
936 struct netlink_sock *nlk = nlk_sk(sk);
938 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
939 !test_bit(0, &nlk->state)) {
940 skb_set_owner_r(skb, sk);
941 skb_queue_tail(&sk->sk_receive_queue, skb);
942 sk->sk_data_ready(sk, skb->len);
943 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
948 struct netlink_broadcast_data {
949 struct sock *exclude_sk;
954 int delivery_failure;
958 struct sk_buff *skb, *skb2;
961 static inline int do_one_broadcast(struct sock *sk,
962 struct netlink_broadcast_data *p)
964 struct netlink_sock *nlk = nlk_sk(sk);
967 if (p->exclude_sk == sk)
970 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
971 !test_bit(p->group - 1, nlk->groups))
974 if (!net_eq(sock_net(sk), p->net))
983 if (p->skb2 == NULL) {
984 if (skb_shared(p->skb)) {
985 p->skb2 = skb_clone(p->skb, p->allocation);
987 p->skb2 = skb_get(p->skb);
989 * skb ownership may have been set when
990 * delivered to a previous socket.
995 if (p->skb2 == NULL) {
997 /* Clone failed. Notify ALL listeners. */
999 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1000 p->delivery_failure = 1;
1001 } else if (sk_filter(sk, p->skb2)) {
1004 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1005 netlink_overrun(sk);
1006 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1007 p->delivery_failure = 1;
1009 p->congested |= val;
1019 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
1020 u32 group, gfp_t allocation)
1022 struct net *net = sock_net(ssk);
1023 struct netlink_broadcast_data info;
1024 struct hlist_node *node;
1027 skb = netlink_trim(skb, allocation);
1029 info.exclude_sk = ssk;
1034 info.delivery_failure = 0;
1037 info.allocation = allocation;
1041 /* While we sleep in clone, do not allow to change socket list */
1043 netlink_lock_table();
1045 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1046 do_one_broadcast(sk, &info);
1050 netlink_unlock_table();
1052 kfree_skb(info.skb2);
1054 if (info.delivery_failure)
1057 if (info.delivered) {
1058 if (info.congested && (allocation & __GFP_WAIT))
1064 EXPORT_SYMBOL(netlink_broadcast);
1066 struct netlink_set_err_data {
1067 struct sock *exclude_sk;
1073 static inline int do_one_set_err(struct sock *sk,
1074 struct netlink_set_err_data *p)
1076 struct netlink_sock *nlk = nlk_sk(sk);
1078 if (sk == p->exclude_sk)
1081 if (sock_net(sk) != sock_net(p->exclude_sk))
1084 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1085 !test_bit(p->group - 1, nlk->groups))
1088 sk->sk_err = p->code;
1089 sk->sk_error_report(sk);
1095 * netlink_set_err - report error to broadcast listeners
1096 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1097 * @pid: the PID of a process that we want to skip (if any)
1098 * @groups: the broadcast group that will notice the error
1099 * @code: error code, must be negative (as usual in kernelspace)
1101 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1103 struct netlink_set_err_data info;
1104 struct hlist_node *node;
1107 info.exclude_sk = ssk;
1110 /* sk->sk_err wants a positive error value */
1113 read_lock(&nl_table_lock);
1115 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1116 do_one_set_err(sk, &info);
1118 read_unlock(&nl_table_lock);
1121 /* must be called with netlink table grabbed */
1122 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1126 int old, new = !!is_new, subscriptions;
1128 old = test_bit(group - 1, nlk->groups);
1129 subscriptions = nlk->subscriptions - old + new;
1131 __set_bit(group - 1, nlk->groups);
1133 __clear_bit(group - 1, nlk->groups);
1134 netlink_update_subscriptions(&nlk->sk, subscriptions);
1135 netlink_update_listeners(&nlk->sk);
1138 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1139 char __user *optval, int optlen)
1141 struct sock *sk = sock->sk;
1142 struct netlink_sock *nlk = nlk_sk(sk);
1143 unsigned int val = 0;
1146 if (level != SOL_NETLINK)
1147 return -ENOPROTOOPT;
1149 if (optlen >= sizeof(int) &&
1150 get_user(val, (unsigned int __user *)optval))
1154 case NETLINK_PKTINFO:
1156 nlk->flags |= NETLINK_RECV_PKTINFO;
1158 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1161 case NETLINK_ADD_MEMBERSHIP:
1162 case NETLINK_DROP_MEMBERSHIP: {
1163 if (!netlink_capable(sock, NL_NONROOT_RECV))
1165 err = netlink_realloc_groups(sk);
1168 if (!val || val - 1 >= nlk->ngroups)
1170 netlink_table_grab();
1171 netlink_update_socket_mc(nlk, val,
1172 optname == NETLINK_ADD_MEMBERSHIP);
1173 netlink_table_ungrab();
1177 case NETLINK_BROADCAST_ERROR:
1179 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
1181 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
1190 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1191 char __user *optval, int __user *optlen)
1193 struct sock *sk = sock->sk;
1194 struct netlink_sock *nlk = nlk_sk(sk);
1197 if (level != SOL_NETLINK)
1198 return -ENOPROTOOPT;
1200 if (get_user(len, optlen))
1206 case NETLINK_PKTINFO:
1207 if (len < sizeof(int))
1210 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1211 if (put_user(len, optlen) ||
1212 put_user(val, optval))
1216 case NETLINK_BROADCAST_ERROR:
1217 if (len < sizeof(int))
1220 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
1221 if (put_user(len, optlen) ||
1222 put_user(val, optval))
1232 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1234 struct nl_pktinfo info;
1236 info.group = NETLINK_CB(skb).dst_group;
1237 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1240 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1241 struct msghdr *msg, size_t len)
1243 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1244 struct sock *sk = sock->sk;
1245 struct netlink_sock *nlk = nlk_sk(sk);
1246 struct sockaddr_nl *addr = msg->msg_name;
1249 struct sk_buff *skb;
1251 struct scm_cookie scm;
1253 if (msg->msg_flags&MSG_OOB)
1256 if (NULL == siocb->scm)
1258 err = scm_send(sock, msg, siocb->scm);
1262 if (msg->msg_namelen) {
1263 if (addr->nl_family != AF_NETLINK)
1265 dst_pid = addr->nl_pid;
1266 dst_group = ffs(addr->nl_groups);
1267 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1270 dst_pid = nlk->dst_pid;
1271 dst_group = nlk->dst_group;
1275 err = netlink_autobind(sock);
1281 if (len > sk->sk_sndbuf - 32)
1284 skb = alloc_skb(len, GFP_KERNEL);
1288 NETLINK_CB(skb).pid = nlk->pid;
1289 NETLINK_CB(skb).dst_group = dst_group;
1290 NETLINK_CB(skb).loginuid = audit_get_loginuid(current);
1291 NETLINK_CB(skb).sessionid = audit_get_sessionid(current);
1292 security_task_getsecid(current, &(NETLINK_CB(skb).sid));
1293 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1295 /* What can I do? Netlink is asynchronous, so that
1296 we will have to save current capabilities to
1297 check them, when this message will be delivered
1298 to corresponding kernel module. --ANK (980802)
1302 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1307 err = security_netlink_send(sk, skb);
1314 atomic_inc(&skb->users);
1315 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1317 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1323 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1324 struct msghdr *msg, size_t len,
1327 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1328 struct scm_cookie scm;
1329 struct sock *sk = sock->sk;
1330 struct netlink_sock *nlk = nlk_sk(sk);
1331 int noblock = flags&MSG_DONTWAIT;
1333 struct sk_buff *skb;
1341 skb = skb_recv_datagram(sk, flags, noblock, &err);
1345 msg->msg_namelen = 0;
1349 msg->msg_flags |= MSG_TRUNC;
1353 skb_reset_transport_header(skb);
1354 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1356 if (msg->msg_name) {
1357 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
1358 addr->nl_family = AF_NETLINK;
1360 addr->nl_pid = NETLINK_CB(skb).pid;
1361 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1362 msg->msg_namelen = sizeof(*addr);
1365 if (nlk->flags & NETLINK_RECV_PKTINFO)
1366 netlink_cmsg_recv_pktinfo(msg, skb);
1368 if (NULL == siocb->scm) {
1369 memset(&scm, 0, sizeof(scm));
1372 siocb->scm->creds = *NETLINK_CREDS(skb);
1373 if (flags & MSG_TRUNC)
1375 skb_free_datagram(sk, skb);
1377 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1380 scm_recv(sock, msg, siocb->scm, flags);
1382 netlink_rcv_wake(sk);
1383 return err ? : copied;
1386 static void netlink_data_ready(struct sock *sk, int len)
1392 * We export these functions to other modules. They provide a
1393 * complete set of kernel non-blocking support for message
1398 netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1399 void (*input)(struct sk_buff *skb),
1400 struct mutex *cb_mutex, struct module *module)
1402 struct socket *sock;
1404 struct netlink_sock *nlk;
1405 unsigned long *listeners = NULL;
1409 if (unit < 0 || unit >= MAX_LINKS)
1412 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1416 * We have to just have a reference on the net from sk, but don't
1417 * get_net it. Besides, we cannot get and then put the net here.
1418 * So we create one inside init_net and the move it to net.
1421 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
1422 goto out_sock_release_nosk;
1425 sk_change_net(sk, net);
1430 listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
1432 goto out_sock_release;
1434 sk->sk_data_ready = netlink_data_ready;
1436 nlk_sk(sk)->netlink_rcv = input;
1438 if (netlink_insert(sk, net, 0))
1439 goto out_sock_release;
1442 nlk->flags |= NETLINK_KERNEL_SOCKET;
1444 netlink_table_grab();
1445 if (!nl_table[unit].registered) {
1446 nl_table[unit].groups = groups;
1447 nl_table[unit].listeners = listeners;
1448 nl_table[unit].cb_mutex = cb_mutex;
1449 nl_table[unit].module = module;
1450 nl_table[unit].registered = 1;
1453 nl_table[unit].registered++;
1455 netlink_table_ungrab();
1460 netlink_kernel_release(sk);
1463 out_sock_release_nosk:
1467 EXPORT_SYMBOL(netlink_kernel_create);
1471 netlink_kernel_release(struct sock *sk)
1473 sk_release_kernel(sk);
1475 EXPORT_SYMBOL(netlink_kernel_release);
1479 * netlink_change_ngroups - change number of multicast groups
1481 * This changes the number of multicast groups that are available
1482 * on a certain netlink family. Note that it is not possible to
1483 * change the number of groups to below 32. Also note that it does
1484 * not implicitly call netlink_clear_multicast_users() when the
1485 * number of groups is reduced.
1487 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1488 * @groups: The new number of groups.
1490 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1492 unsigned long *listeners, *old = NULL;
1493 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1499 netlink_table_grab();
1500 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1501 listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC);
1506 old = tbl->listeners;
1507 memcpy(listeners, old, NLGRPSZ(tbl->groups));
1508 rcu_assign_pointer(tbl->listeners, listeners);
1510 tbl->groups = groups;
1513 netlink_table_ungrab();
1518 EXPORT_SYMBOL(netlink_change_ngroups);
1521 * netlink_clear_multicast_users - kick off multicast listeners
1523 * This function removes all listeners from the given group.
1524 * @ksk: The kernel netlink socket, as returned by
1525 * netlink_kernel_create().
1526 * @group: The multicast group to clear.
1528 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1531 struct hlist_node *node;
1532 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1534 netlink_table_grab();
1536 sk_for_each_bound(sk, node, &tbl->mc_list)
1537 netlink_update_socket_mc(nlk_sk(sk), group, 0);
1539 netlink_table_ungrab();
1541 EXPORT_SYMBOL(netlink_clear_multicast_users);
1543 void netlink_set_nonroot(int protocol, unsigned int flags)
1545 if ((unsigned int)protocol < MAX_LINKS)
1546 nl_table[protocol].nl_nonroot = flags;
1548 EXPORT_SYMBOL(netlink_set_nonroot);
1550 static void netlink_destroy_callback(struct netlink_callback *cb)
1557 * It looks a bit ugly.
1558 * It would be better to create kernel thread.
1561 static int netlink_dump(struct sock *sk)
1563 struct netlink_sock *nlk = nlk_sk(sk);
1564 struct netlink_callback *cb;
1565 struct sk_buff *skb;
1566 struct nlmsghdr *nlh;
1567 int len, err = -ENOBUFS;
1569 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1573 mutex_lock(nlk->cb_mutex);
1581 len = cb->dump(skb, cb);
1584 mutex_unlock(nlk->cb_mutex);
1586 if (sk_filter(sk, skb))
1589 skb_queue_tail(&sk->sk_receive_queue, skb);
1590 sk->sk_data_ready(sk, skb->len);
1595 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1599 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1601 if (sk_filter(sk, skb))
1604 skb_queue_tail(&sk->sk_receive_queue, skb);
1605 sk->sk_data_ready(sk, skb->len);
1611 mutex_unlock(nlk->cb_mutex);
1613 netlink_destroy_callback(cb);
1617 mutex_unlock(nlk->cb_mutex);
1623 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1624 struct nlmsghdr *nlh,
1625 int (*dump)(struct sk_buff *skb,
1626 struct netlink_callback *),
1627 int (*done)(struct netlink_callback *))
1629 struct netlink_callback *cb;
1631 struct netlink_sock *nlk;
1633 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1640 atomic_inc(&skb->users);
1643 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid);
1645 netlink_destroy_callback(cb);
1646 return -ECONNREFUSED;
1649 /* A dump is in progress... */
1650 mutex_lock(nlk->cb_mutex);
1652 mutex_unlock(nlk->cb_mutex);
1653 netlink_destroy_callback(cb);
1658 mutex_unlock(nlk->cb_mutex);
1663 /* We successfully started a dump, by returning -EINTR we
1664 * signal not to send ACK even if it was requested.
1668 EXPORT_SYMBOL(netlink_dump_start);
1670 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1672 struct sk_buff *skb;
1673 struct nlmsghdr *rep;
1674 struct nlmsgerr *errmsg;
1675 size_t payload = sizeof(*errmsg);
1677 /* error messages get the original request appened */
1679 payload += nlmsg_len(nlh);
1681 skb = nlmsg_new(payload, GFP_KERNEL);
1685 sk = netlink_lookup(sock_net(in_skb->sk),
1686 in_skb->sk->sk_protocol,
1687 NETLINK_CB(in_skb).pid);
1689 sk->sk_err = ENOBUFS;
1690 sk->sk_error_report(sk);
1696 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1697 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1698 errmsg = nlmsg_data(rep);
1699 errmsg->error = err;
1700 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1701 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1703 EXPORT_SYMBOL(netlink_ack);
1705 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1708 struct nlmsghdr *nlh;
1711 while (skb->len >= nlmsg_total_size(0)) {
1714 nlh = nlmsg_hdr(skb);
1717 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
1720 /* Only requests are handled by the kernel */
1721 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1724 /* Skip control messages */
1725 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1733 if (nlh->nlmsg_flags & NLM_F_ACK || err)
1734 netlink_ack(skb, nlh, err);
1737 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1738 if (msglen > skb->len)
1740 skb_pull(skb, msglen);
1745 EXPORT_SYMBOL(netlink_rcv_skb);
1748 * nlmsg_notify - send a notification netlink message
1749 * @sk: netlink socket to use
1750 * @skb: notification message
1751 * @pid: destination netlink pid for reports or 0
1752 * @group: destination multicast group or 0
1753 * @report: 1 to report back, 0 to disable
1754 * @flags: allocation flags
1756 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1757 unsigned int group, int report, gfp_t flags)
1762 int exclude_pid = 0;
1765 atomic_inc(&skb->users);
1769 /* errors reported via destination sk->sk_err, but propagate
1770 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
1771 err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1777 err2 = nlmsg_unicast(sk, skb, pid);
1778 if (!err || err == -ESRCH)
1784 EXPORT_SYMBOL(nlmsg_notify);
1786 #ifdef CONFIG_PROC_FS
1787 struct nl_seq_iter {
1788 struct seq_net_private p;
1793 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1795 struct nl_seq_iter *iter = seq->private;
1798 struct hlist_node *node;
1801 for (i = 0; i < MAX_LINKS; i++) {
1802 struct nl_pid_hash *hash = &nl_table[i].hash;
1804 for (j = 0; j <= hash->mask; j++) {
1805 sk_for_each(s, node, &hash->table[j]) {
1806 if (sock_net(s) != seq_file_net(seq))
1820 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1821 __acquires(nl_table_lock)
1823 read_lock(&nl_table_lock);
1824 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1827 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1830 struct nl_seq_iter *iter;
1835 if (v == SEQ_START_TOKEN)
1836 return netlink_seq_socket_idx(seq, 0);
1838 iter = seq->private;
1842 } while (s && sock_net(s) != seq_file_net(seq));
1847 j = iter->hash_idx + 1;
1850 struct nl_pid_hash *hash = &nl_table[i].hash;
1852 for (; j <= hash->mask; j++) {
1853 s = sk_head(&hash->table[j]);
1854 while (s && sock_net(s) != seq_file_net(seq))
1864 } while (++i < MAX_LINKS);
1869 static void netlink_seq_stop(struct seq_file *seq, void *v)
1870 __releases(nl_table_lock)
1872 read_unlock(&nl_table_lock);
1876 static int netlink_seq_show(struct seq_file *seq, void *v)
1878 if (v == SEQ_START_TOKEN)
1880 "sk Eth Pid Groups "
1881 "Rmem Wmem Dump Locks\n");
1884 struct netlink_sock *nlk = nlk_sk(s);
1886 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1890 nlk->groups ? (u32)nlk->groups[0] : 0,
1891 atomic_read(&s->sk_rmem_alloc),
1892 atomic_read(&s->sk_wmem_alloc),
1894 atomic_read(&s->sk_refcnt)
1901 static const struct seq_operations netlink_seq_ops = {
1902 .start = netlink_seq_start,
1903 .next = netlink_seq_next,
1904 .stop = netlink_seq_stop,
1905 .show = netlink_seq_show,
1909 static int netlink_seq_open(struct inode *inode, struct file *file)
1911 return seq_open_net(inode, file, &netlink_seq_ops,
1912 sizeof(struct nl_seq_iter));
1915 static const struct file_operations netlink_seq_fops = {
1916 .owner = THIS_MODULE,
1917 .open = netlink_seq_open,
1919 .llseek = seq_lseek,
1920 .release = seq_release_net,
1925 int netlink_register_notifier(struct notifier_block *nb)
1927 return atomic_notifier_chain_register(&netlink_chain, nb);
1929 EXPORT_SYMBOL(netlink_register_notifier);
1931 int netlink_unregister_notifier(struct notifier_block *nb)
1933 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1935 EXPORT_SYMBOL(netlink_unregister_notifier);
1937 static const struct proto_ops netlink_ops = {
1938 .family = PF_NETLINK,
1939 .owner = THIS_MODULE,
1940 .release = netlink_release,
1941 .bind = netlink_bind,
1942 .connect = netlink_connect,
1943 .socketpair = sock_no_socketpair,
1944 .accept = sock_no_accept,
1945 .getname = netlink_getname,
1946 .poll = datagram_poll,
1947 .ioctl = sock_no_ioctl,
1948 .listen = sock_no_listen,
1949 .shutdown = sock_no_shutdown,
1950 .setsockopt = netlink_setsockopt,
1951 .getsockopt = netlink_getsockopt,
1952 .sendmsg = netlink_sendmsg,
1953 .recvmsg = netlink_recvmsg,
1954 .mmap = sock_no_mmap,
1955 .sendpage = sock_no_sendpage,
1958 static struct net_proto_family netlink_family_ops = {
1959 .family = PF_NETLINK,
1960 .create = netlink_create,
1961 .owner = THIS_MODULE, /* for consistency 8) */
1964 static int __net_init netlink_net_init(struct net *net)
1966 #ifdef CONFIG_PROC_FS
1967 if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops))
1973 static void __net_exit netlink_net_exit(struct net *net)
1975 #ifdef CONFIG_PROC_FS
1976 proc_net_remove(net, "netlink");
1980 static struct pernet_operations __net_initdata netlink_net_ops = {
1981 .init = netlink_net_init,
1982 .exit = netlink_net_exit,
1985 static int __init netlink_proto_init(void)
1987 struct sk_buff *dummy_skb;
1989 unsigned long limit;
1991 int err = proto_register(&netlink_proto, 0);
1996 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
1998 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2002 if (num_physpages >= (128 * 1024))
2003 limit = num_physpages >> (21 - PAGE_SHIFT);
2005 limit = num_physpages >> (23 - PAGE_SHIFT);
2007 order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
2008 limit = (1UL << order) / sizeof(struct hlist_head);
2009 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2011 for (i = 0; i < MAX_LINKS; i++) {
2012 struct nl_pid_hash *hash = &nl_table[i].hash;
2014 hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table));
2017 nl_pid_hash_free(nl_table[i].hash.table,
2018 1 * sizeof(*hash->table));
2022 hash->max_shift = order;
2025 hash->rehash_time = jiffies;
2028 sock_register(&netlink_family_ops);
2029 register_pernet_subsys(&netlink_net_ops);
2030 /* The netlink device handler may be needed early. */
2035 panic("netlink_init: Cannot allocate nl_table\n");
2038 core_initcall(netlink_proto_init);