mm/sl[aou]b: Move kmem_cache allocations into common code
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / ipv4 / tunnel4.c
1 /* tunnel4.c: Generic IP tunnel transformer.
2  *
3  * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4  */
5
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/slab.h>
12 #include <net/icmp.h>
13 #include <net/ip.h>
14 #include <net/protocol.h>
15 #include <net/xfrm.h>
16
17 static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly;
18 static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly;
19 static DEFINE_MUTEX(tunnel4_mutex);
20
21 static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family)
22 {
23         return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
24 }
25
26 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
27 {
28         struct xfrm_tunnel __rcu **pprev;
29         struct xfrm_tunnel *t;
30
31         int ret = -EEXIST;
32         int priority = handler->priority;
33
34         mutex_lock(&tunnel4_mutex);
35
36         for (pprev = fam_handlers(family);
37              (t = rcu_dereference_protected(*pprev,
38                         lockdep_is_held(&tunnel4_mutex))) != NULL;
39              pprev = &t->next) {
40                 if (t->priority > priority)
41                         break;
42                 if (t->priority == priority)
43                         goto err;
44         }
45
46         handler->next = *pprev;
47         rcu_assign_pointer(*pprev, handler);
48
49         ret = 0;
50
51 err:
52         mutex_unlock(&tunnel4_mutex);
53
54         return ret;
55 }
56 EXPORT_SYMBOL(xfrm4_tunnel_register);
57
58 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
59 {
60         struct xfrm_tunnel __rcu **pprev;
61         struct xfrm_tunnel *t;
62         int ret = -ENOENT;
63
64         mutex_lock(&tunnel4_mutex);
65
66         for (pprev = fam_handlers(family);
67              (t = rcu_dereference_protected(*pprev,
68                         lockdep_is_held(&tunnel4_mutex))) != NULL;
69              pprev = &t->next) {
70                 if (t == handler) {
71                         *pprev = handler->next;
72                         ret = 0;
73                         break;
74                 }
75         }
76
77         mutex_unlock(&tunnel4_mutex);
78
79         synchronize_net();
80
81         return ret;
82 }
83 EXPORT_SYMBOL(xfrm4_tunnel_deregister);
84
85 #define for_each_tunnel_rcu(head, handler)              \
86         for (handler = rcu_dereference(head);           \
87              handler != NULL;                           \
88              handler = rcu_dereference(handler->next))  \
89         
90 static int tunnel4_rcv(struct sk_buff *skb)
91 {
92         struct xfrm_tunnel *handler;
93
94         if (!pskb_may_pull(skb, sizeof(struct iphdr)))
95                 goto drop;
96
97         for_each_tunnel_rcu(tunnel4_handlers, handler)
98                 if (!handler->handler(skb))
99                         return 0;
100
101         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
102
103 drop:
104         kfree_skb(skb);
105         return 0;
106 }
107
108 #if IS_ENABLED(CONFIG_IPV6)
109 static int tunnel64_rcv(struct sk_buff *skb)
110 {
111         struct xfrm_tunnel *handler;
112
113         if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
114                 goto drop;
115
116         for_each_tunnel_rcu(tunnel64_handlers, handler)
117                 if (!handler->handler(skb))
118                         return 0;
119
120         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
121
122 drop:
123         kfree_skb(skb);
124         return 0;
125 }
126 #endif
127
128 static void tunnel4_err(struct sk_buff *skb, u32 info)
129 {
130         struct xfrm_tunnel *handler;
131
132         for_each_tunnel_rcu(tunnel4_handlers, handler)
133                 if (!handler->err_handler(skb, info))
134                         break;
135 }
136
137 #if IS_ENABLED(CONFIG_IPV6)
138 static void tunnel64_err(struct sk_buff *skb, u32 info)
139 {
140         struct xfrm_tunnel *handler;
141
142         for_each_tunnel_rcu(tunnel64_handlers, handler)
143                 if (!handler->err_handler(skb, info))
144                         break;
145 }
146 #endif
147
148 static const struct net_protocol tunnel4_protocol = {
149         .handler        =       tunnel4_rcv,
150         .err_handler    =       tunnel4_err,
151         .no_policy      =       1,
152         .netns_ok       =       1,
153 };
154
155 #if IS_ENABLED(CONFIG_IPV6)
156 static const struct net_protocol tunnel64_protocol = {
157         .handler        =       tunnel64_rcv,
158         .err_handler    =       tunnel64_err,
159         .no_policy      =       1,
160         .netns_ok       =       1,
161 };
162 #endif
163
164 static int __init tunnel4_init(void)
165 {
166         if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) {
167                 pr_err("%s: can't add protocol\n", __func__);
168                 return -EAGAIN;
169         }
170 #if IS_ENABLED(CONFIG_IPV6)
171         if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) {
172                 pr_err("tunnel64 init: can't add protocol\n");
173                 inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
174                 return -EAGAIN;
175         }
176 #endif
177         return 0;
178 }
179
180 static void __exit tunnel4_fini(void)
181 {
182 #if IS_ENABLED(CONFIG_IPV6)
183         if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6))
184                 pr_err("tunnel64 close: can't remove protocol\n");
185 #endif
186         if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP))
187                 pr_err("tunnel4 close: can't remove protocol\n");
188 }
189
190 module_init(tunnel4_init);
191 module_exit(tunnel4_fini);
192 MODULE_LICENSE("GPL");